1 #include "qemu/osdep.h" 2 #include "target/arm/idau.h" 3 #include "trace.h" 4 #include "cpu.h" 5 #include "internals.h" 6 #include "exec/gdbstub.h" 7 #include "exec/helper-proto.h" 8 #include "qemu/host-utils.h" 9 #include "sysemu/arch_init.h" 10 #include "sysemu/sysemu.h" 11 #include "qemu/bitops.h" 12 #include "qemu/crc32c.h" 13 #include "exec/exec-all.h" 14 #include "exec/cpu_ldst.h" 15 #include "arm_ldst.h" 16 #include <zlib.h> /* For crc32 */ 17 #include "exec/semihost.h" 18 #include "sysemu/kvm.h" 19 #include "fpu/softfloat.h" 20 #include "qemu/range.h" 21 22 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ 23 24 #ifndef CONFIG_USER_ONLY 25 /* Cacheability and shareability attributes for a memory access */ 26 typedef struct ARMCacheAttrs { 27 unsigned int attrs:8; /* as in the MAIR register encoding */ 28 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 29 } ARMCacheAttrs; 30 31 static bool get_phys_addr(CPUARMState *env, target_ulong address, 32 MMUAccessType access_type, ARMMMUIdx mmu_idx, 33 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 34 target_ulong *page_size, 35 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 36 37 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 38 MMUAccessType access_type, ARMMMUIdx mmu_idx, 39 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 40 target_ulong *page_size_ptr, 41 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 42 43 /* Security attributes for an address, as returned by v8m_security_lookup. */ 44 typedef struct V8M_SAttributes { 45 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 46 bool ns; 47 bool nsc; 48 uint8_t sregion; 49 bool srvalid; 50 uint8_t iregion; 51 bool irvalid; 52 } V8M_SAttributes; 53 54 static void v8m_security_lookup(CPUARMState *env, uint32_t address, 55 MMUAccessType access_type, ARMMMUIdx mmu_idx, 56 V8M_SAttributes *sattrs); 57 #endif 58 59 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 60 { 61 int nregs; 62 63 /* VFP data registers are always little-endian. */ 64 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 65 if (reg < nregs) { 66 stq_le_p(buf, *aa32_vfp_dreg(env, reg)); 67 return 8; 68 } 69 if (arm_feature(env, ARM_FEATURE_NEON)) { 70 /* Aliases for Q regs. */ 71 nregs += 16; 72 if (reg < nregs) { 73 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 74 stq_le_p(buf, q[0]); 75 stq_le_p(buf + 8, q[1]); 76 return 16; 77 } 78 } 79 switch (reg - nregs) { 80 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4; 81 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4; 82 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4; 83 } 84 return 0; 85 } 86 87 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 88 { 89 int nregs; 90 91 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 92 if (reg < nregs) { 93 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); 94 return 8; 95 } 96 if (arm_feature(env, ARM_FEATURE_NEON)) { 97 nregs += 16; 98 if (reg < nregs) { 99 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 100 q[0] = ldq_le_p(buf); 101 q[1] = ldq_le_p(buf + 8); 102 return 16; 103 } 104 } 105 switch (reg - nregs) { 106 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; 107 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4; 108 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4; 109 } 110 return 0; 111 } 112 113 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 114 { 115 switch (reg) { 116 case 0 ... 31: 117 /* 128 bit FP register */ 118 { 119 uint64_t *q = aa64_vfp_qreg(env, reg); 120 stq_le_p(buf, q[0]); 121 stq_le_p(buf + 8, q[1]); 122 return 16; 123 } 124 case 32: 125 /* FPSR */ 126 stl_p(buf, vfp_get_fpsr(env)); 127 return 4; 128 case 33: 129 /* FPCR */ 130 stl_p(buf, vfp_get_fpcr(env)); 131 return 4; 132 default: 133 return 0; 134 } 135 } 136 137 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 138 { 139 switch (reg) { 140 case 0 ... 31: 141 /* 128 bit FP register */ 142 { 143 uint64_t *q = aa64_vfp_qreg(env, reg); 144 q[0] = ldq_le_p(buf); 145 q[1] = ldq_le_p(buf + 8); 146 return 16; 147 } 148 case 32: 149 /* FPSR */ 150 vfp_set_fpsr(env, ldl_p(buf)); 151 return 4; 152 case 33: 153 /* FPCR */ 154 vfp_set_fpcr(env, ldl_p(buf)); 155 return 4; 156 default: 157 return 0; 158 } 159 } 160 161 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) 162 { 163 assert(ri->fieldoffset); 164 if (cpreg_field_is_64bit(ri)) { 165 return CPREG_FIELD64(env, ri); 166 } else { 167 return CPREG_FIELD32(env, ri); 168 } 169 } 170 171 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 172 uint64_t value) 173 { 174 assert(ri->fieldoffset); 175 if (cpreg_field_is_64bit(ri)) { 176 CPREG_FIELD64(env, ri) = value; 177 } else { 178 CPREG_FIELD32(env, ri) = value; 179 } 180 } 181 182 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) 183 { 184 return (char *)env + ri->fieldoffset; 185 } 186 187 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) 188 { 189 /* Raw read of a coprocessor register (as needed for migration, etc). */ 190 if (ri->type & ARM_CP_CONST) { 191 return ri->resetvalue; 192 } else if (ri->raw_readfn) { 193 return ri->raw_readfn(env, ri); 194 } else if (ri->readfn) { 195 return ri->readfn(env, ri); 196 } else { 197 return raw_read(env, ri); 198 } 199 } 200 201 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, 202 uint64_t v) 203 { 204 /* Raw write of a coprocessor register (as needed for migration, etc). 205 * Note that constant registers are treated as write-ignored; the 206 * caller should check for success by whether a readback gives the 207 * value written. 208 */ 209 if (ri->type & ARM_CP_CONST) { 210 return; 211 } else if (ri->raw_writefn) { 212 ri->raw_writefn(env, ri, v); 213 } else if (ri->writefn) { 214 ri->writefn(env, ri, v); 215 } else { 216 raw_write(env, ri, v); 217 } 218 } 219 220 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg) 221 { 222 ARMCPU *cpu = arm_env_get_cpu(env); 223 const ARMCPRegInfo *ri; 224 uint32_t key; 225 226 key = cpu->dyn_xml.cpregs_keys[reg]; 227 ri = get_arm_cp_reginfo(cpu->cp_regs, key); 228 if (ri) { 229 if (cpreg_field_is_64bit(ri)) { 230 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); 231 } else { 232 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); 233 } 234 } 235 return 0; 236 } 237 238 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) 239 { 240 return 0; 241 } 242 243 static bool raw_accessors_invalid(const ARMCPRegInfo *ri) 244 { 245 /* Return true if the regdef would cause an assertion if you called 246 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a 247 * program bug for it not to have the NO_RAW flag). 248 * NB that returning false here doesn't necessarily mean that calling 249 * read/write_raw_cp_reg() is safe, because we can't distinguish "has 250 * read/write access functions which are safe for raw use" from "has 251 * read/write access functions which have side effects but has forgotten 252 * to provide raw access functions". 253 * The tests here line up with the conditions in read/write_raw_cp_reg() 254 * and assertions in raw_read()/raw_write(). 255 */ 256 if ((ri->type & ARM_CP_CONST) || 257 ri->fieldoffset || 258 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { 259 return false; 260 } 261 return true; 262 } 263 264 bool write_cpustate_to_list(ARMCPU *cpu) 265 { 266 /* Write the coprocessor state from cpu->env to the (index,value) list. */ 267 int i; 268 bool ok = true; 269 270 for (i = 0; i < cpu->cpreg_array_len; i++) { 271 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 272 const ARMCPRegInfo *ri; 273 274 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 275 if (!ri) { 276 ok = false; 277 continue; 278 } 279 if (ri->type & ARM_CP_NO_RAW) { 280 continue; 281 } 282 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri); 283 } 284 return ok; 285 } 286 287 bool write_list_to_cpustate(ARMCPU *cpu) 288 { 289 int i; 290 bool ok = true; 291 292 for (i = 0; i < cpu->cpreg_array_len; i++) { 293 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 294 uint64_t v = cpu->cpreg_values[i]; 295 const ARMCPRegInfo *ri; 296 297 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 298 if (!ri) { 299 ok = false; 300 continue; 301 } 302 if (ri->type & ARM_CP_NO_RAW) { 303 continue; 304 } 305 /* Write value and confirm it reads back as written 306 * (to catch read-only registers and partially read-only 307 * registers where the incoming migration value doesn't match) 308 */ 309 write_raw_cp_reg(&cpu->env, ri, v); 310 if (read_raw_cp_reg(&cpu->env, ri) != v) { 311 ok = false; 312 } 313 } 314 return ok; 315 } 316 317 static void add_cpreg_to_list(gpointer key, gpointer opaque) 318 { 319 ARMCPU *cpu = opaque; 320 uint64_t regidx; 321 const ARMCPRegInfo *ri; 322 323 regidx = *(uint32_t *)key; 324 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 325 326 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 327 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); 328 /* The value array need not be initialized at this point */ 329 cpu->cpreg_array_len++; 330 } 331 } 332 333 static void count_cpreg(gpointer key, gpointer opaque) 334 { 335 ARMCPU *cpu = opaque; 336 uint64_t regidx; 337 const ARMCPRegInfo *ri; 338 339 regidx = *(uint32_t *)key; 340 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 341 342 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 343 cpu->cpreg_array_len++; 344 } 345 } 346 347 static gint cpreg_key_compare(gconstpointer a, gconstpointer b) 348 { 349 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); 350 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); 351 352 if (aidx > bidx) { 353 return 1; 354 } 355 if (aidx < bidx) { 356 return -1; 357 } 358 return 0; 359 } 360 361 void init_cpreg_list(ARMCPU *cpu) 362 { 363 /* Initialise the cpreg_tuples[] array based on the cp_regs hash. 364 * Note that we require cpreg_tuples[] to be sorted by key ID. 365 */ 366 GList *keys; 367 int arraylen; 368 369 keys = g_hash_table_get_keys(cpu->cp_regs); 370 keys = g_list_sort(keys, cpreg_key_compare); 371 372 cpu->cpreg_array_len = 0; 373 374 g_list_foreach(keys, count_cpreg, cpu); 375 376 arraylen = cpu->cpreg_array_len; 377 cpu->cpreg_indexes = g_new(uint64_t, arraylen); 378 cpu->cpreg_values = g_new(uint64_t, arraylen); 379 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); 380 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); 381 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 382 cpu->cpreg_array_len = 0; 383 384 g_list_foreach(keys, add_cpreg_to_list, cpu); 385 386 assert(cpu->cpreg_array_len == arraylen); 387 388 g_list_free(keys); 389 } 390 391 /* 392 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but 393 * they are accessible when EL3 is using AArch64 regardless of EL3.NS. 394 * 395 * access_el3_aa32ns: Used to check AArch32 register views. 396 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views. 397 */ 398 static CPAccessResult access_el3_aa32ns(CPUARMState *env, 399 const ARMCPRegInfo *ri, 400 bool isread) 401 { 402 bool secure = arm_is_secure_below_el3(env); 403 404 assert(!arm_el_is_aa64(env, 3)); 405 if (secure) { 406 return CP_ACCESS_TRAP_UNCATEGORIZED; 407 } 408 return CP_ACCESS_OK; 409 } 410 411 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env, 412 const ARMCPRegInfo *ri, 413 bool isread) 414 { 415 if (!arm_el_is_aa64(env, 3)) { 416 return access_el3_aa32ns(env, ri, isread); 417 } 418 return CP_ACCESS_OK; 419 } 420 421 /* Some secure-only AArch32 registers trap to EL3 if used from 422 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). 423 * Note that an access from Secure EL1 can only happen if EL3 is AArch64. 424 * We assume that the .access field is set to PL1_RW. 425 */ 426 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, 427 const ARMCPRegInfo *ri, 428 bool isread) 429 { 430 if (arm_current_el(env) == 3) { 431 return CP_ACCESS_OK; 432 } 433 if (arm_is_secure_below_el3(env)) { 434 return CP_ACCESS_TRAP_EL3; 435 } 436 /* This will be EL1 NS and EL2 NS, which just UNDEF */ 437 return CP_ACCESS_TRAP_UNCATEGORIZED; 438 } 439 440 /* Check for traps to "powerdown debug" registers, which are controlled 441 * by MDCR.TDOSA 442 */ 443 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 444 bool isread) 445 { 446 int el = arm_current_el(env); 447 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) || 448 (env->cp15.mdcr_el2 & MDCR_TDE) || 449 (env->cp15.hcr_el2 & HCR_TGE); 450 451 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) { 452 return CP_ACCESS_TRAP_EL2; 453 } 454 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 455 return CP_ACCESS_TRAP_EL3; 456 } 457 return CP_ACCESS_OK; 458 } 459 460 /* Check for traps to "debug ROM" registers, which are controlled 461 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 462 */ 463 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 464 bool isread) 465 { 466 int el = arm_current_el(env); 467 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) || 468 (env->cp15.mdcr_el2 & MDCR_TDE) || 469 (env->cp15.hcr_el2 & HCR_TGE); 470 471 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) { 472 return CP_ACCESS_TRAP_EL2; 473 } 474 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 475 return CP_ACCESS_TRAP_EL3; 476 } 477 return CP_ACCESS_OK; 478 } 479 480 /* Check for traps to general debug registers, which are controlled 481 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 482 */ 483 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 484 bool isread) 485 { 486 int el = arm_current_el(env); 487 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) || 488 (env->cp15.mdcr_el2 & MDCR_TDE) || 489 (env->cp15.hcr_el2 & HCR_TGE); 490 491 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) { 492 return CP_ACCESS_TRAP_EL2; 493 } 494 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 495 return CP_ACCESS_TRAP_EL3; 496 } 497 return CP_ACCESS_OK; 498 } 499 500 /* Check for traps to performance monitor registers, which are controlled 501 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. 502 */ 503 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, 504 bool isread) 505 { 506 int el = arm_current_el(env); 507 508 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 509 && !arm_is_secure_below_el3(env)) { 510 return CP_ACCESS_TRAP_EL2; 511 } 512 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 513 return CP_ACCESS_TRAP_EL3; 514 } 515 return CP_ACCESS_OK; 516 } 517 518 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 519 { 520 ARMCPU *cpu = arm_env_get_cpu(env); 521 522 raw_write(env, ri, value); 523 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ 524 } 525 526 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 527 { 528 ARMCPU *cpu = arm_env_get_cpu(env); 529 530 if (raw_read(env, ri) != value) { 531 /* Unlike real hardware the qemu TLB uses virtual addresses, 532 * not modified virtual addresses, so this causes a TLB flush. 533 */ 534 tlb_flush(CPU(cpu)); 535 raw_write(env, ri, value); 536 } 537 } 538 539 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 540 uint64_t value) 541 { 542 ARMCPU *cpu = arm_env_get_cpu(env); 543 544 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 545 && !extended_addresses_enabled(env)) { 546 /* For VMSA (when not using the LPAE long descriptor page table 547 * format) this register includes the ASID, so do a TLB flush. 548 * For PMSA it is purely a process ID and no action is needed. 549 */ 550 tlb_flush(CPU(cpu)); 551 } 552 raw_write(env, ri, value); 553 } 554 555 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, 556 uint64_t value) 557 { 558 /* Invalidate all (TLBIALL) */ 559 ARMCPU *cpu = arm_env_get_cpu(env); 560 561 tlb_flush(CPU(cpu)); 562 } 563 564 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, 565 uint64_t value) 566 { 567 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 568 ARMCPU *cpu = arm_env_get_cpu(env); 569 570 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); 571 } 572 573 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, 574 uint64_t value) 575 { 576 /* Invalidate by ASID (TLBIASID) */ 577 ARMCPU *cpu = arm_env_get_cpu(env); 578 579 tlb_flush(CPU(cpu)); 580 } 581 582 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, 583 uint64_t value) 584 { 585 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 586 ARMCPU *cpu = arm_env_get_cpu(env); 587 588 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); 589 } 590 591 /* IS variants of TLB operations must affect all cores */ 592 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 593 uint64_t value) 594 { 595 CPUState *cs = ENV_GET_CPU(env); 596 597 tlb_flush_all_cpus_synced(cs); 598 } 599 600 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 601 uint64_t value) 602 { 603 CPUState *cs = ENV_GET_CPU(env); 604 605 tlb_flush_all_cpus_synced(cs); 606 } 607 608 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 609 uint64_t value) 610 { 611 CPUState *cs = ENV_GET_CPU(env); 612 613 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 614 } 615 616 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 617 uint64_t value) 618 { 619 CPUState *cs = ENV_GET_CPU(env); 620 621 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 622 } 623 624 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, 625 uint64_t value) 626 { 627 CPUState *cs = ENV_GET_CPU(env); 628 629 tlb_flush_by_mmuidx(cs, 630 ARMMMUIdxBit_S12NSE1 | 631 ARMMMUIdxBit_S12NSE0 | 632 ARMMMUIdxBit_S2NS); 633 } 634 635 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 636 uint64_t value) 637 { 638 CPUState *cs = ENV_GET_CPU(env); 639 640 tlb_flush_by_mmuidx_all_cpus_synced(cs, 641 ARMMMUIdxBit_S12NSE1 | 642 ARMMMUIdxBit_S12NSE0 | 643 ARMMMUIdxBit_S2NS); 644 } 645 646 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, 647 uint64_t value) 648 { 649 /* Invalidate by IPA. This has to invalidate any structures that 650 * contain only stage 2 translation information, but does not need 651 * to apply to structures that contain combined stage 1 and stage 2 652 * translation information. 653 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 654 */ 655 CPUState *cs = ENV_GET_CPU(env); 656 uint64_t pageaddr; 657 658 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 659 return; 660 } 661 662 pageaddr = sextract64(value << 12, 0, 40); 663 664 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS); 665 } 666 667 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 668 uint64_t value) 669 { 670 CPUState *cs = ENV_GET_CPU(env); 671 uint64_t pageaddr; 672 673 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 674 return; 675 } 676 677 pageaddr = sextract64(value << 12, 0, 40); 678 679 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 680 ARMMMUIdxBit_S2NS); 681 } 682 683 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 684 uint64_t value) 685 { 686 CPUState *cs = ENV_GET_CPU(env); 687 688 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 689 } 690 691 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 692 uint64_t value) 693 { 694 CPUState *cs = ENV_GET_CPU(env); 695 696 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 697 } 698 699 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 700 uint64_t value) 701 { 702 CPUState *cs = ENV_GET_CPU(env); 703 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 704 705 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); 706 } 707 708 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 709 uint64_t value) 710 { 711 CPUState *cs = ENV_GET_CPU(env); 712 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 713 714 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 715 ARMMMUIdxBit_S1E2); 716 } 717 718 static const ARMCPRegInfo cp_reginfo[] = { 719 /* Define the secure and non-secure FCSE identifier CP registers 720 * separately because there is no secure bank in V8 (no _EL3). This allows 721 * the secure register to be properly reset and migrated. There is also no 722 * v8 EL1 version of the register so the non-secure instance stands alone. 723 */ 724 { .name = "FCSEIDR", 725 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 726 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 727 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), 728 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 729 { .name = "FCSEIDR_S", 730 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 731 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 732 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), 733 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 734 /* Define the secure and non-secure context identifier CP registers 735 * separately because there is no secure bank in V8 (no _EL3). This allows 736 * the secure register to be properly reset and migrated. In the 737 * non-secure case, the 32-bit register will have reset and migration 738 * disabled during registration as it is handled by the 64-bit instance. 739 */ 740 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, 741 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 742 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 743 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), 744 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 745 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, 746 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 747 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 748 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), 749 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 750 REGINFO_SENTINEL 751 }; 752 753 static const ARMCPRegInfo not_v8_cp_reginfo[] = { 754 /* NB: Some of these registers exist in v8 but with more precise 755 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). 756 */ 757 /* MMU Domain access control / MPU write buffer control */ 758 { .name = "DACR", 759 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, 760 .access = PL1_RW, .resetvalue = 0, 761 .writefn = dacr_write, .raw_writefn = raw_write, 762 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 763 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 764 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. 765 * For v6 and v5, these mappings are overly broad. 766 */ 767 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, 768 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 769 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, 770 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 771 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, 772 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 773 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, 774 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 775 /* Cache maintenance ops; some of this space may be overridden later. */ 776 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 777 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 778 .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, 779 REGINFO_SENTINEL 780 }; 781 782 static const ARMCPRegInfo not_v6_cp_reginfo[] = { 783 /* Not all pre-v6 cores implemented this WFI, so this is slightly 784 * over-broad. 785 */ 786 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, 787 .access = PL1_W, .type = ARM_CP_WFI }, 788 REGINFO_SENTINEL 789 }; 790 791 static const ARMCPRegInfo not_v7_cp_reginfo[] = { 792 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which 793 * is UNPREDICTABLE; we choose to NOP as most implementations do). 794 */ 795 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 796 .access = PL1_W, .type = ARM_CP_WFI }, 797 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice 798 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and 799 * OMAPCP will override this space. 800 */ 801 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, 802 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), 803 .resetvalue = 0 }, 804 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, 805 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), 806 .resetvalue = 0 }, 807 /* v6 doesn't have the cache ID registers but Linux reads them anyway */ 808 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, 809 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 810 .resetvalue = 0 }, 811 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; 812 * implementing it as RAZ means the "debug architecture version" bits 813 * will read as a reserved value, which should cause Linux to not try 814 * to use the debug hardware. 815 */ 816 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 817 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 818 /* MMU TLB control. Note that the wildcarding means we cover not just 819 * the unified TLB ops but also the dside/iside/inner-shareable variants. 820 */ 821 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, 822 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, 823 .type = ARM_CP_NO_RAW }, 824 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, 825 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, 826 .type = ARM_CP_NO_RAW }, 827 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, 828 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, 829 .type = ARM_CP_NO_RAW }, 830 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, 831 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, 832 .type = ARM_CP_NO_RAW }, 833 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, 834 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, 835 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, 836 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, 837 REGINFO_SENTINEL 838 }; 839 840 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, 841 uint64_t value) 842 { 843 uint32_t mask = 0; 844 845 /* In ARMv8 most bits of CPACR_EL1 are RES0. */ 846 if (!arm_feature(env, ARM_FEATURE_V8)) { 847 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. 848 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. 849 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. 850 */ 851 if (arm_feature(env, ARM_FEATURE_VFP)) { 852 /* VFP coprocessor: cp10 & cp11 [23:20] */ 853 mask |= (1 << 31) | (1 << 30) | (0xf << 20); 854 855 if (!arm_feature(env, ARM_FEATURE_NEON)) { 856 /* ASEDIS [31] bit is RAO/WI */ 857 value |= (1 << 31); 858 } 859 860 /* VFPv3 and upwards with NEON implement 32 double precision 861 * registers (D0-D31). 862 */ 863 if (!arm_feature(env, ARM_FEATURE_NEON) || 864 !arm_feature(env, ARM_FEATURE_VFP3)) { 865 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ 866 value |= (1 << 30); 867 } 868 } 869 value &= mask; 870 } 871 env->cp15.cpacr_el1 = value; 872 } 873 874 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 875 { 876 /* Call cpacr_write() so that we reset with the correct RAO bits set 877 * for our CPU features. 878 */ 879 cpacr_write(env, ri, 0); 880 } 881 882 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 883 bool isread) 884 { 885 if (arm_feature(env, ARM_FEATURE_V8)) { 886 /* Check if CPACR accesses are to be trapped to EL2 */ 887 if (arm_current_el(env) == 1 && 888 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) { 889 return CP_ACCESS_TRAP_EL2; 890 /* Check if CPACR accesses are to be trapped to EL3 */ 891 } else if (arm_current_el(env) < 3 && 892 (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 893 return CP_ACCESS_TRAP_EL3; 894 } 895 } 896 897 return CP_ACCESS_OK; 898 } 899 900 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, 901 bool isread) 902 { 903 /* Check if CPTR accesses are set to trap to EL3 */ 904 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 905 return CP_ACCESS_TRAP_EL3; 906 } 907 908 return CP_ACCESS_OK; 909 } 910 911 static const ARMCPRegInfo v6_cp_reginfo[] = { 912 /* prefetch by MVA in v6, NOP in v7 */ 913 { .name = "MVA_prefetch", 914 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, 915 .access = PL1_W, .type = ARM_CP_NOP }, 916 /* We need to break the TB after ISB to execute self-modifying code 917 * correctly and also to take any pending interrupts immediately. 918 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. 919 */ 920 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, 921 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, 922 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, 923 .access = PL0_W, .type = ARM_CP_NOP }, 924 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, 925 .access = PL0_W, .type = ARM_CP_NOP }, 926 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, 927 .access = PL1_RW, 928 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), 929 offsetof(CPUARMState, cp15.ifar_ns) }, 930 .resetvalue = 0, }, 931 /* Watchpoint Fault Address Register : should actually only be present 932 * for 1136, 1176, 11MPCore. 933 */ 934 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, 935 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, 936 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, 937 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, 938 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), 939 .resetfn = cpacr_reset, .writefn = cpacr_write }, 940 REGINFO_SENTINEL 941 }; 942 943 /* Definitions for the PMU registers */ 944 #define PMCRN_MASK 0xf800 945 #define PMCRN_SHIFT 11 946 #define PMCRD 0x8 947 #define PMCRC 0x4 948 #define PMCRE 0x1 949 950 static inline uint32_t pmu_num_counters(CPUARMState *env) 951 { 952 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; 953 } 954 955 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 956 static inline uint64_t pmu_counter_mask(CPUARMState *env) 957 { 958 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); 959 } 960 961 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, 962 bool isread) 963 { 964 /* Performance monitor registers user accessibility is controlled 965 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable 966 * trapping to EL2 or EL3 for other accesses. 967 */ 968 int el = arm_current_el(env); 969 970 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { 971 return CP_ACCESS_TRAP; 972 } 973 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 974 && !arm_is_secure_below_el3(env)) { 975 return CP_ACCESS_TRAP_EL2; 976 } 977 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 978 return CP_ACCESS_TRAP_EL3; 979 } 980 981 return CP_ACCESS_OK; 982 } 983 984 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, 985 const ARMCPRegInfo *ri, 986 bool isread) 987 { 988 /* ER: event counter read trap control */ 989 if (arm_feature(env, ARM_FEATURE_V8) 990 && arm_current_el(env) == 0 991 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 992 && isread) { 993 return CP_ACCESS_OK; 994 } 995 996 return pmreg_access(env, ri, isread); 997 } 998 999 static CPAccessResult pmreg_access_swinc(CPUARMState *env, 1000 const ARMCPRegInfo *ri, 1001 bool isread) 1002 { 1003 /* SW: software increment write trap control */ 1004 if (arm_feature(env, ARM_FEATURE_V8) 1005 && arm_current_el(env) == 0 1006 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 1007 && !isread) { 1008 return CP_ACCESS_OK; 1009 } 1010 1011 return pmreg_access(env, ri, isread); 1012 } 1013 1014 #ifndef CONFIG_USER_ONLY 1015 1016 static CPAccessResult pmreg_access_selr(CPUARMState *env, 1017 const ARMCPRegInfo *ri, 1018 bool isread) 1019 { 1020 /* ER: event counter read trap control */ 1021 if (arm_feature(env, ARM_FEATURE_V8) 1022 && arm_current_el(env) == 0 1023 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { 1024 return CP_ACCESS_OK; 1025 } 1026 1027 return pmreg_access(env, ri, isread); 1028 } 1029 1030 static CPAccessResult pmreg_access_ccntr(CPUARMState *env, 1031 const ARMCPRegInfo *ri, 1032 bool isread) 1033 { 1034 /* CR: cycle counter read trap control */ 1035 if (arm_feature(env, ARM_FEATURE_V8) 1036 && arm_current_el(env) == 0 1037 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 1038 && isread) { 1039 return CP_ACCESS_OK; 1040 } 1041 1042 return pmreg_access(env, ri, isread); 1043 } 1044 1045 static inline bool arm_ccnt_enabled(CPUARMState *env) 1046 { 1047 /* This does not support checking PMCCFILTR_EL0 register */ 1048 1049 if (!(env->cp15.c9_pmcr & PMCRE) || !(env->cp15.c9_pmcnten & (1 << 31))) { 1050 return false; 1051 } 1052 1053 return true; 1054 } 1055 1056 void pmccntr_sync(CPUARMState *env) 1057 { 1058 uint64_t temp_ticks; 1059 1060 temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1061 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1062 1063 if (env->cp15.c9_pmcr & PMCRD) { 1064 /* Increment once every 64 processor clock cycles */ 1065 temp_ticks /= 64; 1066 } 1067 1068 if (arm_ccnt_enabled(env)) { 1069 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt; 1070 } 1071 } 1072 1073 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1074 uint64_t value) 1075 { 1076 pmccntr_sync(env); 1077 1078 if (value & PMCRC) { 1079 /* The counter has been reset */ 1080 env->cp15.c15_ccnt = 0; 1081 } 1082 1083 /* only the DP, X, D and E bits are writable */ 1084 env->cp15.c9_pmcr &= ~0x39; 1085 env->cp15.c9_pmcr |= (value & 0x39); 1086 1087 pmccntr_sync(env); 1088 } 1089 1090 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1091 { 1092 uint64_t total_ticks; 1093 1094 if (!arm_ccnt_enabled(env)) { 1095 /* Counter is disabled, do not change value */ 1096 return env->cp15.c15_ccnt; 1097 } 1098 1099 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1100 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1101 1102 if (env->cp15.c9_pmcr & PMCRD) { 1103 /* Increment once every 64 processor clock cycles */ 1104 total_ticks /= 64; 1105 } 1106 return total_ticks - env->cp15.c15_ccnt; 1107 } 1108 1109 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1110 uint64_t value) 1111 { 1112 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and 1113 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the 1114 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are 1115 * accessed. 1116 */ 1117 env->cp15.c9_pmselr = value & 0x1f; 1118 } 1119 1120 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1121 uint64_t value) 1122 { 1123 uint64_t total_ticks; 1124 1125 if (!arm_ccnt_enabled(env)) { 1126 /* Counter is disabled, set the absolute value */ 1127 env->cp15.c15_ccnt = value; 1128 return; 1129 } 1130 1131 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1132 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1133 1134 if (env->cp15.c9_pmcr & PMCRD) { 1135 /* Increment once every 64 processor clock cycles */ 1136 total_ticks /= 64; 1137 } 1138 env->cp15.c15_ccnt = total_ticks - value; 1139 } 1140 1141 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, 1142 uint64_t value) 1143 { 1144 uint64_t cur_val = pmccntr_read(env, NULL); 1145 1146 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); 1147 } 1148 1149 #else /* CONFIG_USER_ONLY */ 1150 1151 void pmccntr_sync(CPUARMState *env) 1152 { 1153 } 1154 1155 #endif 1156 1157 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1158 uint64_t value) 1159 { 1160 pmccntr_sync(env); 1161 env->cp15.pmccfiltr_el0 = value & 0xfc000000; 1162 pmccntr_sync(env); 1163 } 1164 1165 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1166 uint64_t value) 1167 { 1168 value &= pmu_counter_mask(env); 1169 env->cp15.c9_pmcnten |= value; 1170 } 1171 1172 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1173 uint64_t value) 1174 { 1175 value &= pmu_counter_mask(env); 1176 env->cp15.c9_pmcnten &= ~value; 1177 } 1178 1179 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1180 uint64_t value) 1181 { 1182 value &= pmu_counter_mask(env); 1183 env->cp15.c9_pmovsr &= ~value; 1184 } 1185 1186 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1187 uint64_t value) 1188 { 1189 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when 1190 * PMSELR value is equal to or greater than the number of implemented 1191 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. 1192 */ 1193 if (env->cp15.c9_pmselr == 0x1f) { 1194 pmccfiltr_write(env, ri, value); 1195 } 1196 } 1197 1198 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) 1199 { 1200 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER 1201 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write(). 1202 */ 1203 if (env->cp15.c9_pmselr == 0x1f) { 1204 return env->cp15.pmccfiltr_el0; 1205 } else { 1206 return 0; 1207 } 1208 } 1209 1210 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1211 uint64_t value) 1212 { 1213 if (arm_feature(env, ARM_FEATURE_V8)) { 1214 env->cp15.c9_pmuserenr = value & 0xf; 1215 } else { 1216 env->cp15.c9_pmuserenr = value & 1; 1217 } 1218 } 1219 1220 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1221 uint64_t value) 1222 { 1223 /* We have no event counters so only the C bit can be changed */ 1224 value &= pmu_counter_mask(env); 1225 env->cp15.c9_pminten |= value; 1226 } 1227 1228 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1229 uint64_t value) 1230 { 1231 value &= pmu_counter_mask(env); 1232 env->cp15.c9_pminten &= ~value; 1233 } 1234 1235 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 1236 uint64_t value) 1237 { 1238 /* Note that even though the AArch64 view of this register has bits 1239 * [10:0] all RES0 we can only mask the bottom 5, to comply with the 1240 * architectural requirements for bits which are RES0 only in some 1241 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 1242 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) 1243 */ 1244 raw_write(env, ri, value & ~0x1FULL); 1245 } 1246 1247 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 1248 { 1249 /* We only mask off bits that are RES0 both for AArch64 and AArch32. 1250 * For bits that vary between AArch32/64, code needs to check the 1251 * current execution mode before directly using the feature bit. 1252 */ 1253 uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK; 1254 1255 if (!arm_feature(env, ARM_FEATURE_EL2)) { 1256 valid_mask &= ~SCR_HCE; 1257 1258 /* On ARMv7, SMD (or SCD as it is called in v7) is only 1259 * supported if EL2 exists. The bit is UNK/SBZP when 1260 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero 1261 * when EL2 is unavailable. 1262 * On ARMv8, this bit is always available. 1263 */ 1264 if (arm_feature(env, ARM_FEATURE_V7) && 1265 !arm_feature(env, ARM_FEATURE_V8)) { 1266 valid_mask &= ~SCR_SMD; 1267 } 1268 } 1269 1270 /* Clear all-context RES0 bits. */ 1271 value &= valid_mask; 1272 raw_write(env, ri, value); 1273 } 1274 1275 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1276 { 1277 ARMCPU *cpu = arm_env_get_cpu(env); 1278 1279 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 1280 * bank 1281 */ 1282 uint32_t index = A32_BANKED_REG_GET(env, csselr, 1283 ri->secure & ARM_CP_SECSTATE_S); 1284 1285 return cpu->ccsidr[index]; 1286 } 1287 1288 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1289 uint64_t value) 1290 { 1291 raw_write(env, ri, value & 0xf); 1292 } 1293 1294 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1295 { 1296 CPUState *cs = ENV_GET_CPU(env); 1297 uint64_t ret = 0; 1298 1299 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { 1300 ret |= CPSR_I; 1301 } 1302 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { 1303 ret |= CPSR_F; 1304 } 1305 /* External aborts are not possible in QEMU so A bit is always clear */ 1306 return ret; 1307 } 1308 1309 static const ARMCPRegInfo v7_cp_reginfo[] = { 1310 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ 1311 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 1312 .access = PL1_W, .type = ARM_CP_NOP }, 1313 /* Performance monitors are implementation defined in v7, 1314 * but with an ARM recommended set of registers, which we 1315 * follow (although we don't actually implement any counters) 1316 * 1317 * Performance registers fall into three categories: 1318 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) 1319 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) 1320 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) 1321 * For the cases controlled by PMUSERENR we must set .access to PL0_RW 1322 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. 1323 */ 1324 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, 1325 .access = PL0_RW, .type = ARM_CP_ALIAS, 1326 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1327 .writefn = pmcntenset_write, 1328 .accessfn = pmreg_access, 1329 .raw_writefn = raw_write }, 1330 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, 1331 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, 1332 .access = PL0_RW, .accessfn = pmreg_access, 1333 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, 1334 .writefn = pmcntenset_write, .raw_writefn = raw_write }, 1335 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, 1336 .access = PL0_RW, 1337 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1338 .accessfn = pmreg_access, 1339 .writefn = pmcntenclr_write, 1340 .type = ARM_CP_ALIAS }, 1341 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, 1342 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, 1343 .access = PL0_RW, .accessfn = pmreg_access, 1344 .type = ARM_CP_ALIAS, 1345 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), 1346 .writefn = pmcntenclr_write }, 1347 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, 1348 .access = PL0_RW, 1349 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 1350 .accessfn = pmreg_access, 1351 .writefn = pmovsr_write, 1352 .raw_writefn = raw_write }, 1353 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, 1354 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, 1355 .access = PL0_RW, .accessfn = pmreg_access, 1356 .type = ARM_CP_ALIAS, 1357 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 1358 .writefn = pmovsr_write, 1359 .raw_writefn = raw_write }, 1360 /* Unimplemented so WI. */ 1361 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, 1362 .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NOP }, 1363 #ifndef CONFIG_USER_ONLY 1364 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, 1365 .access = PL0_RW, .type = ARM_CP_ALIAS, 1366 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), 1367 .accessfn = pmreg_access_selr, .writefn = pmselr_write, 1368 .raw_writefn = raw_write}, 1369 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, 1370 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, 1371 .access = PL0_RW, .accessfn = pmreg_access_selr, 1372 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), 1373 .writefn = pmselr_write, .raw_writefn = raw_write, }, 1374 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, 1375 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, 1376 .readfn = pmccntr_read, .writefn = pmccntr_write32, 1377 .accessfn = pmreg_access_ccntr }, 1378 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, 1379 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, 1380 .access = PL0_RW, .accessfn = pmreg_access_ccntr, 1381 .type = ARM_CP_IO, 1382 .readfn = pmccntr_read, .writefn = pmccntr_write, }, 1383 #endif 1384 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, 1385 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, 1386 .writefn = pmccfiltr_write, 1387 .access = PL0_RW, .accessfn = pmreg_access, 1388 .type = ARM_CP_IO, 1389 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), 1390 .resetvalue = 0, }, 1391 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, 1392 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access, 1393 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 1394 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, 1395 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, 1396 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access, 1397 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 1398 /* Unimplemented, RAZ/WI. */ 1399 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, 1400 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0, 1401 .accessfn = pmreg_access_xevcntr }, 1402 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, 1403 .access = PL0_R | PL1_RW, .accessfn = access_tpm, 1404 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), 1405 .resetvalue = 0, 1406 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 1407 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, 1408 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, 1409 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 1410 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 1411 .resetvalue = 0, 1412 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 1413 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, 1414 .access = PL1_RW, .accessfn = access_tpm, 1415 .type = ARM_CP_ALIAS | ARM_CP_IO, 1416 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), 1417 .resetvalue = 0, 1418 .writefn = pmintenset_write, .raw_writefn = raw_write }, 1419 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, 1420 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, 1421 .access = PL1_RW, .accessfn = access_tpm, 1422 .type = ARM_CP_IO, 1423 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1424 .writefn = pmintenset_write, .raw_writefn = raw_write, 1425 .resetvalue = 0x0 }, 1426 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, 1427 .access = PL1_RW, .accessfn = access_tpm, 1428 .type = ARM_CP_ALIAS | ARM_CP_IO, 1429 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1430 .writefn = pmintenclr_write, }, 1431 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, 1432 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, 1433 .access = PL1_RW, .accessfn = access_tpm, 1434 .type = ARM_CP_ALIAS | ARM_CP_IO, 1435 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1436 .writefn = pmintenclr_write }, 1437 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, 1438 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, 1439 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, 1440 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, 1441 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, 1442 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0, 1443 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), 1444 offsetof(CPUARMState, cp15.csselr_ns) } }, 1445 /* Auxiliary ID register: this actually has an IMPDEF value but for now 1446 * just RAZ for all cores: 1447 */ 1448 { .name = "AIDR", .state = ARM_CP_STATE_BOTH, 1449 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, 1450 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 1451 /* Auxiliary fault status registers: these also are IMPDEF, and we 1452 * choose to RAZ/WI for all cores. 1453 */ 1454 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, 1455 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, 1456 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 1457 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, 1458 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, 1459 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 1460 /* MAIR can just read-as-written because we don't implement caches 1461 * and so don't need to care about memory attributes. 1462 */ 1463 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, 1464 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 1465 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), 1466 .resetvalue = 0 }, 1467 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, 1468 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, 1469 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), 1470 .resetvalue = 0 }, 1471 /* For non-long-descriptor page tables these are PRRR and NMRR; 1472 * regardless they still act as reads-as-written for QEMU. 1473 */ 1474 /* MAIR0/1 are defined separately from their 64-bit counterpart which 1475 * allows them to assign the correct fieldoffset based on the endianness 1476 * handled in the field definitions. 1477 */ 1478 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, 1479 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW, 1480 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), 1481 offsetof(CPUARMState, cp15.mair0_ns) }, 1482 .resetfn = arm_cp_reset_ignore }, 1483 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, 1484 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW, 1485 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), 1486 offsetof(CPUARMState, cp15.mair1_ns) }, 1487 .resetfn = arm_cp_reset_ignore }, 1488 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, 1489 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, 1490 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, 1491 /* 32 bit ITLB invalidates */ 1492 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, 1493 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1494 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 1495 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1496 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, 1497 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1498 /* 32 bit DTLB invalidates */ 1499 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, 1500 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1501 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 1502 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1503 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, 1504 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1505 /* 32 bit TLB invalidates */ 1506 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 1507 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1508 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 1509 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1510 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 1511 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1512 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 1513 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 1514 REGINFO_SENTINEL 1515 }; 1516 1517 static const ARMCPRegInfo v7mp_cp_reginfo[] = { 1518 /* 32 bit TLB invalidates, Inner Shareable */ 1519 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 1520 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write }, 1521 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 1522 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 1523 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 1524 .type = ARM_CP_NO_RAW, .access = PL1_W, 1525 .writefn = tlbiasid_is_write }, 1526 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 1527 .type = ARM_CP_NO_RAW, .access = PL1_W, 1528 .writefn = tlbimvaa_is_write }, 1529 REGINFO_SENTINEL 1530 }; 1531 1532 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1533 uint64_t value) 1534 { 1535 value &= 1; 1536 env->teecr = value; 1537 } 1538 1539 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, 1540 bool isread) 1541 { 1542 if (arm_current_el(env) == 0 && (env->teecr & 1)) { 1543 return CP_ACCESS_TRAP; 1544 } 1545 return CP_ACCESS_OK; 1546 } 1547 1548 static const ARMCPRegInfo t2ee_cp_reginfo[] = { 1549 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, 1550 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), 1551 .resetvalue = 0, 1552 .writefn = teecr_write }, 1553 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, 1554 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), 1555 .accessfn = teehbr_access, .resetvalue = 0 }, 1556 REGINFO_SENTINEL 1557 }; 1558 1559 static const ARMCPRegInfo v6k_cp_reginfo[] = { 1560 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, 1561 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, 1562 .access = PL0_RW, 1563 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, 1564 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, 1565 .access = PL0_RW, 1566 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), 1567 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, 1568 .resetfn = arm_cp_reset_ignore }, 1569 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, 1570 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, 1571 .access = PL0_R|PL1_W, 1572 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), 1573 .resetvalue = 0}, 1574 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, 1575 .access = PL0_R|PL1_W, 1576 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), 1577 offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, 1578 .resetfn = arm_cp_reset_ignore }, 1579 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, 1580 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, 1581 .access = PL1_RW, 1582 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, 1583 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, 1584 .access = PL1_RW, 1585 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), 1586 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, 1587 .resetvalue = 0 }, 1588 REGINFO_SENTINEL 1589 }; 1590 1591 #ifndef CONFIG_USER_ONLY 1592 1593 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, 1594 bool isread) 1595 { 1596 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. 1597 * Writable only at the highest implemented exception level. 1598 */ 1599 int el = arm_current_el(env); 1600 1601 switch (el) { 1602 case 0: 1603 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) { 1604 return CP_ACCESS_TRAP; 1605 } 1606 break; 1607 case 1: 1608 if (!isread && ri->state == ARM_CP_STATE_AA32 && 1609 arm_is_secure_below_el3(env)) { 1610 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ 1611 return CP_ACCESS_TRAP_UNCATEGORIZED; 1612 } 1613 break; 1614 case 2: 1615 case 3: 1616 break; 1617 } 1618 1619 if (!isread && el < arm_highest_el(env)) { 1620 return CP_ACCESS_TRAP_UNCATEGORIZED; 1621 } 1622 1623 return CP_ACCESS_OK; 1624 } 1625 1626 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, 1627 bool isread) 1628 { 1629 unsigned int cur_el = arm_current_el(env); 1630 bool secure = arm_is_secure(env); 1631 1632 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */ 1633 if (cur_el == 0 && 1634 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) { 1635 return CP_ACCESS_TRAP; 1636 } 1637 1638 if (arm_feature(env, ARM_FEATURE_EL2) && 1639 timeridx == GTIMER_PHYS && !secure && cur_el < 2 && 1640 !extract32(env->cp15.cnthctl_el2, 0, 1)) { 1641 return CP_ACCESS_TRAP_EL2; 1642 } 1643 return CP_ACCESS_OK; 1644 } 1645 1646 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, 1647 bool isread) 1648 { 1649 unsigned int cur_el = arm_current_el(env); 1650 bool secure = arm_is_secure(env); 1651 1652 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if 1653 * EL0[PV]TEN is zero. 1654 */ 1655 if (cur_el == 0 && 1656 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { 1657 return CP_ACCESS_TRAP; 1658 } 1659 1660 if (arm_feature(env, ARM_FEATURE_EL2) && 1661 timeridx == GTIMER_PHYS && !secure && cur_el < 2 && 1662 !extract32(env->cp15.cnthctl_el2, 1, 1)) { 1663 return CP_ACCESS_TRAP_EL2; 1664 } 1665 return CP_ACCESS_OK; 1666 } 1667 1668 static CPAccessResult gt_pct_access(CPUARMState *env, 1669 const ARMCPRegInfo *ri, 1670 bool isread) 1671 { 1672 return gt_counter_access(env, GTIMER_PHYS, isread); 1673 } 1674 1675 static CPAccessResult gt_vct_access(CPUARMState *env, 1676 const ARMCPRegInfo *ri, 1677 bool isread) 1678 { 1679 return gt_counter_access(env, GTIMER_VIRT, isread); 1680 } 1681 1682 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 1683 bool isread) 1684 { 1685 return gt_timer_access(env, GTIMER_PHYS, isread); 1686 } 1687 1688 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 1689 bool isread) 1690 { 1691 return gt_timer_access(env, GTIMER_VIRT, isread); 1692 } 1693 1694 static CPAccessResult gt_stimer_access(CPUARMState *env, 1695 const ARMCPRegInfo *ri, 1696 bool isread) 1697 { 1698 /* The AArch64 register view of the secure physical timer is 1699 * always accessible from EL3, and configurably accessible from 1700 * Secure EL1. 1701 */ 1702 switch (arm_current_el(env)) { 1703 case 1: 1704 if (!arm_is_secure(env)) { 1705 return CP_ACCESS_TRAP; 1706 } 1707 if (!(env->cp15.scr_el3 & SCR_ST)) { 1708 return CP_ACCESS_TRAP_EL3; 1709 } 1710 return CP_ACCESS_OK; 1711 case 0: 1712 case 2: 1713 return CP_ACCESS_TRAP; 1714 case 3: 1715 return CP_ACCESS_OK; 1716 default: 1717 g_assert_not_reached(); 1718 } 1719 } 1720 1721 static uint64_t gt_get_countervalue(CPUARMState *env) 1722 { 1723 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE; 1724 } 1725 1726 static void gt_recalc_timer(ARMCPU *cpu, int timeridx) 1727 { 1728 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; 1729 1730 if (gt->ctl & 1) { 1731 /* Timer enabled: calculate and set current ISTATUS, irq, and 1732 * reset timer to when ISTATUS next has to change 1733 */ 1734 uint64_t offset = timeridx == GTIMER_VIRT ? 1735 cpu->env.cp15.cntvoff_el2 : 0; 1736 uint64_t count = gt_get_countervalue(&cpu->env); 1737 /* Note that this must be unsigned 64 bit arithmetic: */ 1738 int istatus = count - offset >= gt->cval; 1739 uint64_t nexttick; 1740 int irqstate; 1741 1742 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); 1743 1744 irqstate = (istatus && !(gt->ctl & 2)); 1745 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 1746 1747 if (istatus) { 1748 /* Next transition is when count rolls back over to zero */ 1749 nexttick = UINT64_MAX; 1750 } else { 1751 /* Next transition is when we hit cval */ 1752 nexttick = gt->cval + offset; 1753 } 1754 /* Note that the desired next expiry time might be beyond the 1755 * signed-64-bit range of a QEMUTimer -- in this case we just 1756 * set the timer for as far in the future as possible. When the 1757 * timer expires we will reset the timer for any remaining period. 1758 */ 1759 if (nexttick > INT64_MAX / GTIMER_SCALE) { 1760 nexttick = INT64_MAX / GTIMER_SCALE; 1761 } 1762 timer_mod(cpu->gt_timer[timeridx], nexttick); 1763 trace_arm_gt_recalc(timeridx, irqstate, nexttick); 1764 } else { 1765 /* Timer disabled: ISTATUS and timer output always clear */ 1766 gt->ctl &= ~4; 1767 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); 1768 timer_del(cpu->gt_timer[timeridx]); 1769 trace_arm_gt_recalc_disabled(timeridx); 1770 } 1771 } 1772 1773 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 1774 int timeridx) 1775 { 1776 ARMCPU *cpu = arm_env_get_cpu(env); 1777 1778 timer_del(cpu->gt_timer[timeridx]); 1779 } 1780 1781 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 1782 { 1783 return gt_get_countervalue(env); 1784 } 1785 1786 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 1787 { 1788 return gt_get_countervalue(env) - env->cp15.cntvoff_el2; 1789 } 1790 1791 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1792 int timeridx, 1793 uint64_t value) 1794 { 1795 trace_arm_gt_cval_write(timeridx, value); 1796 env->cp15.c14_timer[timeridx].cval = value; 1797 gt_recalc_timer(arm_env_get_cpu(env), timeridx); 1798 } 1799 1800 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, 1801 int timeridx) 1802 { 1803 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0; 1804 1805 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - 1806 (gt_get_countervalue(env) - offset)); 1807 } 1808 1809 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1810 int timeridx, 1811 uint64_t value) 1812 { 1813 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0; 1814 1815 trace_arm_gt_tval_write(timeridx, value); 1816 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 1817 sextract64(value, 0, 32); 1818 gt_recalc_timer(arm_env_get_cpu(env), timeridx); 1819 } 1820 1821 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1822 int timeridx, 1823 uint64_t value) 1824 { 1825 ARMCPU *cpu = arm_env_get_cpu(env); 1826 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 1827 1828 trace_arm_gt_ctl_write(timeridx, value); 1829 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); 1830 if ((oldval ^ value) & 1) { 1831 /* Enable toggled */ 1832 gt_recalc_timer(cpu, timeridx); 1833 } else if ((oldval ^ value) & 2) { 1834 /* IMASK toggled: don't need to recalculate, 1835 * just set the interrupt line based on ISTATUS 1836 */ 1837 int irqstate = (oldval & 4) && !(value & 2); 1838 1839 trace_arm_gt_imask_toggle(timeridx, irqstate); 1840 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 1841 } 1842 } 1843 1844 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1845 { 1846 gt_timer_reset(env, ri, GTIMER_PHYS); 1847 } 1848 1849 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1850 uint64_t value) 1851 { 1852 gt_cval_write(env, ri, GTIMER_PHYS, value); 1853 } 1854 1855 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1856 { 1857 return gt_tval_read(env, ri, GTIMER_PHYS); 1858 } 1859 1860 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1861 uint64_t value) 1862 { 1863 gt_tval_write(env, ri, GTIMER_PHYS, value); 1864 } 1865 1866 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1867 uint64_t value) 1868 { 1869 gt_ctl_write(env, ri, GTIMER_PHYS, value); 1870 } 1871 1872 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1873 { 1874 gt_timer_reset(env, ri, GTIMER_VIRT); 1875 } 1876 1877 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1878 uint64_t value) 1879 { 1880 gt_cval_write(env, ri, GTIMER_VIRT, value); 1881 } 1882 1883 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1884 { 1885 return gt_tval_read(env, ri, GTIMER_VIRT); 1886 } 1887 1888 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1889 uint64_t value) 1890 { 1891 gt_tval_write(env, ri, GTIMER_VIRT, value); 1892 } 1893 1894 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1895 uint64_t value) 1896 { 1897 gt_ctl_write(env, ri, GTIMER_VIRT, value); 1898 } 1899 1900 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 1901 uint64_t value) 1902 { 1903 ARMCPU *cpu = arm_env_get_cpu(env); 1904 1905 trace_arm_gt_cntvoff_write(value); 1906 raw_write(env, ri, value); 1907 gt_recalc_timer(cpu, GTIMER_VIRT); 1908 } 1909 1910 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1911 { 1912 gt_timer_reset(env, ri, GTIMER_HYP); 1913 } 1914 1915 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1916 uint64_t value) 1917 { 1918 gt_cval_write(env, ri, GTIMER_HYP, value); 1919 } 1920 1921 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1922 { 1923 return gt_tval_read(env, ri, GTIMER_HYP); 1924 } 1925 1926 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1927 uint64_t value) 1928 { 1929 gt_tval_write(env, ri, GTIMER_HYP, value); 1930 } 1931 1932 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1933 uint64_t value) 1934 { 1935 gt_ctl_write(env, ri, GTIMER_HYP, value); 1936 } 1937 1938 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1939 { 1940 gt_timer_reset(env, ri, GTIMER_SEC); 1941 } 1942 1943 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1944 uint64_t value) 1945 { 1946 gt_cval_write(env, ri, GTIMER_SEC, value); 1947 } 1948 1949 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1950 { 1951 return gt_tval_read(env, ri, GTIMER_SEC); 1952 } 1953 1954 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1955 uint64_t value) 1956 { 1957 gt_tval_write(env, ri, GTIMER_SEC, value); 1958 } 1959 1960 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1961 uint64_t value) 1962 { 1963 gt_ctl_write(env, ri, GTIMER_SEC, value); 1964 } 1965 1966 void arm_gt_ptimer_cb(void *opaque) 1967 { 1968 ARMCPU *cpu = opaque; 1969 1970 gt_recalc_timer(cpu, GTIMER_PHYS); 1971 } 1972 1973 void arm_gt_vtimer_cb(void *opaque) 1974 { 1975 ARMCPU *cpu = opaque; 1976 1977 gt_recalc_timer(cpu, GTIMER_VIRT); 1978 } 1979 1980 void arm_gt_htimer_cb(void *opaque) 1981 { 1982 ARMCPU *cpu = opaque; 1983 1984 gt_recalc_timer(cpu, GTIMER_HYP); 1985 } 1986 1987 void arm_gt_stimer_cb(void *opaque) 1988 { 1989 ARMCPU *cpu = opaque; 1990 1991 gt_recalc_timer(cpu, GTIMER_SEC); 1992 } 1993 1994 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 1995 /* Note that CNTFRQ is purely reads-as-written for the benefit 1996 * of software; writing it doesn't actually change the timer frequency. 1997 * Our reset value matches the fixed frequency we implement the timer at. 1998 */ 1999 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, 2000 .type = ARM_CP_ALIAS, 2001 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 2002 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), 2003 }, 2004 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 2005 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 2006 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 2007 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 2008 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE, 2009 }, 2010 /* overall control: mostly access permissions */ 2011 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, 2012 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, 2013 .access = PL1_RW, 2014 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), 2015 .resetvalue = 0, 2016 }, 2017 /* per-timer control */ 2018 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2019 .secure = ARM_CP_SECSTATE_NS, 2020 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2021 .accessfn = gt_ptimer_access, 2022 .fieldoffset = offsetoflow32(CPUARMState, 2023 cp15.c14_timer[GTIMER_PHYS].ctl), 2024 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write, 2025 }, 2026 { .name = "CNTP_CTL_S", 2027 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2028 .secure = ARM_CP_SECSTATE_S, 2029 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2030 .accessfn = gt_ptimer_access, 2031 .fieldoffset = offsetoflow32(CPUARMState, 2032 cp15.c14_timer[GTIMER_SEC].ctl), 2033 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 2034 }, 2035 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, 2036 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, 2037 .type = ARM_CP_IO, .access = PL1_RW | PL0_R, 2038 .accessfn = gt_ptimer_access, 2039 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 2040 .resetvalue = 0, 2041 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write, 2042 }, 2043 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, 2044 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2045 .accessfn = gt_vtimer_access, 2046 .fieldoffset = offsetoflow32(CPUARMState, 2047 cp15.c14_timer[GTIMER_VIRT].ctl), 2048 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write, 2049 }, 2050 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, 2051 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, 2052 .type = ARM_CP_IO, .access = PL1_RW | PL0_R, 2053 .accessfn = gt_vtimer_access, 2054 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 2055 .resetvalue = 0, 2056 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write, 2057 }, 2058 /* TimerValue views: a 32 bit downcounting view of the underlying state */ 2059 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2060 .secure = ARM_CP_SECSTATE_NS, 2061 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2062 .accessfn = gt_ptimer_access, 2063 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write, 2064 }, 2065 { .name = "CNTP_TVAL_S", 2066 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2067 .secure = ARM_CP_SECSTATE_S, 2068 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2069 .accessfn = gt_ptimer_access, 2070 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, 2071 }, 2072 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2073 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, 2074 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2075 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, 2076 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write, 2077 }, 2078 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, 2079 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2080 .accessfn = gt_vtimer_access, 2081 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write, 2082 }, 2083 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2084 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, 2085 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2086 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, 2087 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write, 2088 }, 2089 /* The counter itself */ 2090 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, 2091 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2092 .accessfn = gt_pct_access, 2093 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 2094 }, 2095 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, 2096 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, 2097 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2098 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 2099 }, 2100 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, 2101 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2102 .accessfn = gt_vct_access, 2103 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 2104 }, 2105 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 2106 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 2107 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2108 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 2109 }, 2110 /* Comparison value, indicating when the timer goes off */ 2111 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, 2112 .secure = ARM_CP_SECSTATE_NS, 2113 .access = PL1_RW | PL0_R, 2114 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2115 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 2116 .accessfn = gt_ptimer_access, 2117 .writefn = gt_phys_cval_write, .raw_writefn = raw_write, 2118 }, 2119 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, 2120 .secure = ARM_CP_SECSTATE_S, 2121 .access = PL1_RW | PL0_R, 2122 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2123 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 2124 .accessfn = gt_ptimer_access, 2125 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 2126 }, 2127 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, 2128 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, 2129 .access = PL1_RW | PL0_R, 2130 .type = ARM_CP_IO, 2131 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 2132 .resetvalue = 0, .accessfn = gt_ptimer_access, 2133 .writefn = gt_phys_cval_write, .raw_writefn = raw_write, 2134 }, 2135 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, 2136 .access = PL1_RW | PL0_R, 2137 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2138 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 2139 .accessfn = gt_vtimer_access, 2140 .writefn = gt_virt_cval_write, .raw_writefn = raw_write, 2141 }, 2142 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, 2143 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, 2144 .access = PL1_RW | PL0_R, 2145 .type = ARM_CP_IO, 2146 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 2147 .resetvalue = 0, .accessfn = gt_vtimer_access, 2148 .writefn = gt_virt_cval_write, .raw_writefn = raw_write, 2149 }, 2150 /* Secure timer -- this is actually restricted to only EL3 2151 * and configurably Secure-EL1 via the accessfn. 2152 */ 2153 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, 2154 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, 2155 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, 2156 .accessfn = gt_stimer_access, 2157 .readfn = gt_sec_tval_read, 2158 .writefn = gt_sec_tval_write, 2159 .resetfn = gt_sec_timer_reset, 2160 }, 2161 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, 2162 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, 2163 .type = ARM_CP_IO, .access = PL1_RW, 2164 .accessfn = gt_stimer_access, 2165 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), 2166 .resetvalue = 0, 2167 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 2168 }, 2169 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, 2170 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, 2171 .type = ARM_CP_IO, .access = PL1_RW, 2172 .accessfn = gt_stimer_access, 2173 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 2174 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 2175 }, 2176 REGINFO_SENTINEL 2177 }; 2178 2179 #else 2180 2181 /* In user-mode most of the generic timer registers are inaccessible 2182 * however modern kernels (4.12+) allow access to cntvct_el0 2183 */ 2184 2185 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2186 { 2187 /* Currently we have no support for QEMUTimer in linux-user so we 2188 * can't call gt_get_countervalue(env), instead we directly 2189 * call the lower level functions. 2190 */ 2191 return cpu_get_clock() / GTIMER_SCALE; 2192 } 2193 2194 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 2195 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 2196 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 2197 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */, 2198 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 2199 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE, 2200 }, 2201 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 2202 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 2203 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2204 .readfn = gt_virt_cnt_read, 2205 }, 2206 REGINFO_SENTINEL 2207 }; 2208 2209 #endif 2210 2211 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 2212 { 2213 if (arm_feature(env, ARM_FEATURE_LPAE)) { 2214 raw_write(env, ri, value); 2215 } else if (arm_feature(env, ARM_FEATURE_V7)) { 2216 raw_write(env, ri, value & 0xfffff6ff); 2217 } else { 2218 raw_write(env, ri, value & 0xfffff1ff); 2219 } 2220 } 2221 2222 #ifndef CONFIG_USER_ONLY 2223 /* get_phys_addr() isn't present for user-mode-only targets */ 2224 2225 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, 2226 bool isread) 2227 { 2228 if (ri->opc2 & 4) { 2229 /* The ATS12NSO* operations must trap to EL3 if executed in 2230 * Secure EL1 (which can only happen if EL3 is AArch64). 2231 * They are simply UNDEF if executed from NS EL1. 2232 * They function normally from EL2 or EL3. 2233 */ 2234 if (arm_current_el(env) == 1) { 2235 if (arm_is_secure_below_el3(env)) { 2236 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; 2237 } 2238 return CP_ACCESS_TRAP_UNCATEGORIZED; 2239 } 2240 } 2241 return CP_ACCESS_OK; 2242 } 2243 2244 static uint64_t do_ats_write(CPUARMState *env, uint64_t value, 2245 MMUAccessType access_type, ARMMMUIdx mmu_idx) 2246 { 2247 hwaddr phys_addr; 2248 target_ulong page_size; 2249 int prot; 2250 bool ret; 2251 uint64_t par64; 2252 bool format64 = false; 2253 MemTxAttrs attrs = {}; 2254 ARMMMUFaultInfo fi = {}; 2255 ARMCacheAttrs cacheattrs = {}; 2256 2257 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, 2258 &prot, &page_size, &fi, &cacheattrs); 2259 2260 if (is_a64(env)) { 2261 format64 = true; 2262 } else if (arm_feature(env, ARM_FEATURE_LPAE)) { 2263 /* 2264 * ATS1Cxx: 2265 * * TTBCR.EAE determines whether the result is returned using the 2266 * 32-bit or the 64-bit PAR format 2267 * * Instructions executed in Hyp mode always use the 64bit format 2268 * 2269 * ATS1S2NSOxx uses the 64bit format if any of the following is true: 2270 * * The Non-secure TTBCR.EAE bit is set to 1 2271 * * The implementation includes EL2, and the value of HCR.VM is 1 2272 * 2273 * ATS1Hx always uses the 64bit format (not supported yet). 2274 */ 2275 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); 2276 2277 if (arm_feature(env, ARM_FEATURE_EL2)) { 2278 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 2279 format64 |= env->cp15.hcr_el2 & HCR_VM; 2280 } else { 2281 format64 |= arm_current_el(env) == 2; 2282 } 2283 } 2284 } 2285 2286 if (format64) { 2287 /* Create a 64-bit PAR */ 2288 par64 = (1 << 11); /* LPAE bit always set */ 2289 if (!ret) { 2290 par64 |= phys_addr & ~0xfffULL; 2291 if (!attrs.secure) { 2292 par64 |= (1 << 9); /* NS */ 2293 } 2294 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ 2295 par64 |= cacheattrs.shareability << 7; /* SH */ 2296 } else { 2297 uint32_t fsr = arm_fi_to_lfsc(&fi); 2298 2299 par64 |= 1; /* F */ 2300 par64 |= (fsr & 0x3f) << 1; /* FS */ 2301 /* Note that S2WLK and FSTAGE are always zero, because we don't 2302 * implement virtualization and therefore there can't be a stage 2 2303 * fault. 2304 */ 2305 } 2306 } else { 2307 /* fsr is a DFSR/IFSR value for the short descriptor 2308 * translation table format (with WnR always clear). 2309 * Convert it to a 32-bit PAR. 2310 */ 2311 if (!ret) { 2312 /* We do not set any attribute bits in the PAR */ 2313 if (page_size == (1 << 24) 2314 && arm_feature(env, ARM_FEATURE_V7)) { 2315 par64 = (phys_addr & 0xff000000) | (1 << 1); 2316 } else { 2317 par64 = phys_addr & 0xfffff000; 2318 } 2319 if (!attrs.secure) { 2320 par64 |= (1 << 9); /* NS */ 2321 } 2322 } else { 2323 uint32_t fsr = arm_fi_to_sfsc(&fi); 2324 2325 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | 2326 ((fsr & 0xf) << 1) | 1; 2327 } 2328 } 2329 return par64; 2330 } 2331 2332 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 2333 { 2334 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2335 uint64_t par64; 2336 ARMMMUIdx mmu_idx; 2337 int el = arm_current_el(env); 2338 bool secure = arm_is_secure_below_el3(env); 2339 2340 switch (ri->opc2 & 6) { 2341 case 0: 2342 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */ 2343 switch (el) { 2344 case 3: 2345 mmu_idx = ARMMMUIdx_S1E3; 2346 break; 2347 case 2: 2348 mmu_idx = ARMMMUIdx_S1NSE1; 2349 break; 2350 case 1: 2351 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1; 2352 break; 2353 default: 2354 g_assert_not_reached(); 2355 } 2356 break; 2357 case 2: 2358 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ 2359 switch (el) { 2360 case 3: 2361 mmu_idx = ARMMMUIdx_S1SE0; 2362 break; 2363 case 2: 2364 mmu_idx = ARMMMUIdx_S1NSE0; 2365 break; 2366 case 1: 2367 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0; 2368 break; 2369 default: 2370 g_assert_not_reached(); 2371 } 2372 break; 2373 case 4: 2374 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ 2375 mmu_idx = ARMMMUIdx_S12NSE1; 2376 break; 2377 case 6: 2378 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ 2379 mmu_idx = ARMMMUIdx_S12NSE0; 2380 break; 2381 default: 2382 g_assert_not_reached(); 2383 } 2384 2385 par64 = do_ats_write(env, value, access_type, mmu_idx); 2386 2387 A32_BANKED_CURRENT_REG_SET(env, par, par64); 2388 } 2389 2390 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, 2391 uint64_t value) 2392 { 2393 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2394 uint64_t par64; 2395 2396 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS); 2397 2398 A32_BANKED_CURRENT_REG_SET(env, par, par64); 2399 } 2400 2401 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, 2402 bool isread) 2403 { 2404 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) { 2405 return CP_ACCESS_TRAP; 2406 } 2407 return CP_ACCESS_OK; 2408 } 2409 2410 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, 2411 uint64_t value) 2412 { 2413 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2414 ARMMMUIdx mmu_idx; 2415 int secure = arm_is_secure_below_el3(env); 2416 2417 switch (ri->opc2 & 6) { 2418 case 0: 2419 switch (ri->opc1) { 2420 case 0: /* AT S1E1R, AT S1E1W */ 2421 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1; 2422 break; 2423 case 4: /* AT S1E2R, AT S1E2W */ 2424 mmu_idx = ARMMMUIdx_S1E2; 2425 break; 2426 case 6: /* AT S1E3R, AT S1E3W */ 2427 mmu_idx = ARMMMUIdx_S1E3; 2428 break; 2429 default: 2430 g_assert_not_reached(); 2431 } 2432 break; 2433 case 2: /* AT S1E0R, AT S1E0W */ 2434 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0; 2435 break; 2436 case 4: /* AT S12E1R, AT S12E1W */ 2437 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1; 2438 break; 2439 case 6: /* AT S12E0R, AT S12E0W */ 2440 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0; 2441 break; 2442 default: 2443 g_assert_not_reached(); 2444 } 2445 2446 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); 2447 } 2448 #endif 2449 2450 static const ARMCPRegInfo vapa_cp_reginfo[] = { 2451 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, 2452 .access = PL1_RW, .resetvalue = 0, 2453 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), 2454 offsetoflow32(CPUARMState, cp15.par_ns) }, 2455 .writefn = par_write }, 2456 #ifndef CONFIG_USER_ONLY 2457 /* This underdecoding is safe because the reginfo is NO_RAW. */ 2458 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, 2459 .access = PL1_W, .accessfn = ats_access, 2460 .writefn = ats_write, .type = ARM_CP_NO_RAW }, 2461 #endif 2462 REGINFO_SENTINEL 2463 }; 2464 2465 /* Return basic MPU access permission bits. */ 2466 static uint32_t simple_mpu_ap_bits(uint32_t val) 2467 { 2468 uint32_t ret; 2469 uint32_t mask; 2470 int i; 2471 ret = 0; 2472 mask = 3; 2473 for (i = 0; i < 16; i += 2) { 2474 ret |= (val >> i) & mask; 2475 mask <<= 2; 2476 } 2477 return ret; 2478 } 2479 2480 /* Pad basic MPU access permission bits to extended format. */ 2481 static uint32_t extended_mpu_ap_bits(uint32_t val) 2482 { 2483 uint32_t ret; 2484 uint32_t mask; 2485 int i; 2486 ret = 0; 2487 mask = 3; 2488 for (i = 0; i < 16; i += 2) { 2489 ret |= (val & mask) << i; 2490 mask <<= 2; 2491 } 2492 return ret; 2493 } 2494 2495 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 2496 uint64_t value) 2497 { 2498 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); 2499 } 2500 2501 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 2502 { 2503 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); 2504 } 2505 2506 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 2507 uint64_t value) 2508 { 2509 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); 2510 } 2511 2512 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 2513 { 2514 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); 2515 } 2516 2517 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) 2518 { 2519 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 2520 2521 if (!u32p) { 2522 return 0; 2523 } 2524 2525 u32p += env->pmsav7.rnr[M_REG_NS]; 2526 return *u32p; 2527 } 2528 2529 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 2530 uint64_t value) 2531 { 2532 ARMCPU *cpu = arm_env_get_cpu(env); 2533 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 2534 2535 if (!u32p) { 2536 return; 2537 } 2538 2539 u32p += env->pmsav7.rnr[M_REG_NS]; 2540 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 2541 *u32p = value; 2542 } 2543 2544 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2545 uint64_t value) 2546 { 2547 ARMCPU *cpu = arm_env_get_cpu(env); 2548 uint32_t nrgs = cpu->pmsav7_dregion; 2549 2550 if (value >= nrgs) { 2551 qemu_log_mask(LOG_GUEST_ERROR, 2552 "PMSAv7 RGNR write >= # supported regions, %" PRIu32 2553 " > %" PRIu32 "\n", (uint32_t)value, nrgs); 2554 return; 2555 } 2556 2557 raw_write(env, ri, value); 2558 } 2559 2560 static const ARMCPRegInfo pmsav7_cp_reginfo[] = { 2561 /* Reset for all these registers is handled in arm_cpu_reset(), 2562 * because the PMSAv7 is also used by M-profile CPUs, which do 2563 * not register cpregs but still need the state to be reset. 2564 */ 2565 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, 2566 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2567 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), 2568 .readfn = pmsav7_read, .writefn = pmsav7_write, 2569 .resetfn = arm_cp_reset_ignore }, 2570 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, 2571 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2572 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), 2573 .readfn = pmsav7_read, .writefn = pmsav7_write, 2574 .resetfn = arm_cp_reset_ignore }, 2575 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, 2576 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2577 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), 2578 .readfn = pmsav7_read, .writefn = pmsav7_write, 2579 .resetfn = arm_cp_reset_ignore }, 2580 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, 2581 .access = PL1_RW, 2582 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), 2583 .writefn = pmsav7_rgnr_write, 2584 .resetfn = arm_cp_reset_ignore }, 2585 REGINFO_SENTINEL 2586 }; 2587 2588 static const ARMCPRegInfo pmsav5_cp_reginfo[] = { 2589 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 2590 .access = PL1_RW, .type = ARM_CP_ALIAS, 2591 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 2592 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, 2593 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 2594 .access = PL1_RW, .type = ARM_CP_ALIAS, 2595 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 2596 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, 2597 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, 2598 .access = PL1_RW, 2599 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 2600 .resetvalue = 0, }, 2601 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, 2602 .access = PL1_RW, 2603 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 2604 .resetvalue = 0, }, 2605 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 2606 .access = PL1_RW, 2607 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, 2608 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, 2609 .access = PL1_RW, 2610 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, 2611 /* Protection region base and size registers */ 2612 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, 2613 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2614 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, 2615 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, 2616 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2617 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, 2618 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, 2619 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2620 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, 2621 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, 2622 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2623 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, 2624 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, 2625 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2626 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, 2627 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, 2628 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2629 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, 2630 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, 2631 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2632 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, 2633 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, 2634 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2635 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, 2636 REGINFO_SENTINEL 2637 }; 2638 2639 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 2640 uint64_t value) 2641 { 2642 TCR *tcr = raw_ptr(env, ri); 2643 int maskshift = extract32(value, 0, 3); 2644 2645 if (!arm_feature(env, ARM_FEATURE_V8)) { 2646 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { 2647 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when 2648 * using Long-desciptor translation table format */ 2649 value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); 2650 } else if (arm_feature(env, ARM_FEATURE_EL3)) { 2651 /* In an implementation that includes the Security Extensions 2652 * TTBCR has additional fields PD0 [4] and PD1 [5] for 2653 * Short-descriptor translation table format. 2654 */ 2655 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; 2656 } else { 2657 value &= TTBCR_N; 2658 } 2659 } 2660 2661 /* Update the masks corresponding to the TCR bank being written 2662 * Note that we always calculate mask and base_mask, but 2663 * they are only used for short-descriptor tables (ie if EAE is 0); 2664 * for long-descriptor tables the TCR fields are used differently 2665 * and the mask and base_mask values are meaningless. 2666 */ 2667 tcr->raw_tcr = value; 2668 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); 2669 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); 2670 } 2671 2672 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2673 uint64_t value) 2674 { 2675 ARMCPU *cpu = arm_env_get_cpu(env); 2676 2677 if (arm_feature(env, ARM_FEATURE_LPAE)) { 2678 /* With LPAE the TTBCR could result in a change of ASID 2679 * via the TTBCR.A1 bit, so do a TLB flush. 2680 */ 2681 tlb_flush(CPU(cpu)); 2682 } 2683 vmsa_ttbcr_raw_write(env, ri, value); 2684 } 2685 2686 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2687 { 2688 TCR *tcr = raw_ptr(env, ri); 2689 2690 /* Reset both the TCR as well as the masks corresponding to the bank of 2691 * the TCR being reset. 2692 */ 2693 tcr->raw_tcr = 0; 2694 tcr->mask = 0; 2695 tcr->base_mask = 0xffffc000u; 2696 } 2697 2698 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, 2699 uint64_t value) 2700 { 2701 ARMCPU *cpu = arm_env_get_cpu(env); 2702 TCR *tcr = raw_ptr(env, ri); 2703 2704 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ 2705 tlb_flush(CPU(cpu)); 2706 tcr->raw_tcr = value; 2707 } 2708 2709 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2710 uint64_t value) 2711 { 2712 /* 64 bit accesses to the TTBRs can change the ASID and so we 2713 * must flush the TLB. 2714 */ 2715 if (cpreg_field_is_64bit(ri)) { 2716 ARMCPU *cpu = arm_env_get_cpu(env); 2717 2718 tlb_flush(CPU(cpu)); 2719 } 2720 raw_write(env, ri, value); 2721 } 2722 2723 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2724 uint64_t value) 2725 { 2726 ARMCPU *cpu = arm_env_get_cpu(env); 2727 CPUState *cs = CPU(cpu); 2728 2729 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */ 2730 if (raw_read(env, ri) != value) { 2731 tlb_flush_by_mmuidx(cs, 2732 ARMMMUIdxBit_S12NSE1 | 2733 ARMMMUIdxBit_S12NSE0 | 2734 ARMMMUIdxBit_S2NS); 2735 raw_write(env, ri, value); 2736 } 2737 } 2738 2739 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { 2740 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 2741 .access = PL1_RW, .type = ARM_CP_ALIAS, 2742 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), 2743 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, 2744 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 2745 .access = PL1_RW, .resetvalue = 0, 2746 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), 2747 offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, 2748 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, 2749 .access = PL1_RW, .resetvalue = 0, 2750 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), 2751 offsetof(CPUARMState, cp15.dfar_ns) } }, 2752 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, 2753 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, 2754 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), 2755 .resetvalue = 0, }, 2756 REGINFO_SENTINEL 2757 }; 2758 2759 static const ARMCPRegInfo vmsa_cp_reginfo[] = { 2760 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, 2761 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, 2762 .access = PL1_RW, 2763 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, 2764 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, 2765 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, 2766 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 2767 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 2768 offsetof(CPUARMState, cp15.ttbr0_ns) } }, 2769 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, 2770 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, 2771 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 2772 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 2773 offsetof(CPUARMState, cp15.ttbr1_ns) } }, 2774 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, 2775 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 2776 .access = PL1_RW, .writefn = vmsa_tcr_el1_write, 2777 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, 2778 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, 2779 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 2780 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, 2781 .raw_writefn = vmsa_ttbcr_raw_write, 2782 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), 2783 offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, 2784 REGINFO_SENTINEL 2785 }; 2786 2787 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, 2788 uint64_t value) 2789 { 2790 env->cp15.c15_ticonfig = value & 0xe7; 2791 /* The OS_TYPE bit in this register changes the reported CPUID! */ 2792 env->cp15.c0_cpuid = (value & (1 << 5)) ? 2793 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 2794 } 2795 2796 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, 2797 uint64_t value) 2798 { 2799 env->cp15.c15_threadid = value & 0xffff; 2800 } 2801 2802 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, 2803 uint64_t value) 2804 { 2805 /* Wait-for-interrupt (deprecated) */ 2806 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT); 2807 } 2808 2809 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, 2810 uint64_t value) 2811 { 2812 /* On OMAP there are registers indicating the max/min index of dcache lines 2813 * containing a dirty line; cache flush operations have to reset these. 2814 */ 2815 env->cp15.c15_i_max = 0x000; 2816 env->cp15.c15_i_min = 0xff0; 2817 } 2818 2819 static const ARMCPRegInfo omap_cp_reginfo[] = { 2820 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, 2821 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, 2822 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), 2823 .resetvalue = 0, }, 2824 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, 2825 .access = PL1_RW, .type = ARM_CP_NOP }, 2826 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, 2827 .access = PL1_RW, 2828 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, 2829 .writefn = omap_ticonfig_write }, 2830 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, 2831 .access = PL1_RW, 2832 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, 2833 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, 2834 .access = PL1_RW, .resetvalue = 0xff0, 2835 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, 2836 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, 2837 .access = PL1_RW, 2838 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, 2839 .writefn = omap_threadid_write }, 2840 { .name = "TI925T_STATUS", .cp = 15, .crn = 15, 2841 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 2842 .type = ARM_CP_NO_RAW, 2843 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, 2844 /* TODO: Peripheral port remap register: 2845 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller 2846 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), 2847 * when MMU is off. 2848 */ 2849 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 2850 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 2851 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, 2852 .writefn = omap_cachemaint_write }, 2853 { .name = "C9", .cp = 15, .crn = 9, 2854 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, 2855 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, 2856 REGINFO_SENTINEL 2857 }; 2858 2859 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, 2860 uint64_t value) 2861 { 2862 env->cp15.c15_cpar = value & 0x3fff; 2863 } 2864 2865 static const ARMCPRegInfo xscale_cp_reginfo[] = { 2866 { .name = "XSCALE_CPAR", 2867 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 2868 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, 2869 .writefn = xscale_cpar_write, }, 2870 { .name = "XSCALE_AUXCR", 2871 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, 2872 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), 2873 .resetvalue = 0, }, 2874 /* XScale specific cache-lockdown: since we have no cache we NOP these 2875 * and hope the guest does not really rely on cache behaviour. 2876 */ 2877 { .name = "XSCALE_LOCK_ICACHE_LINE", 2878 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, 2879 .access = PL1_W, .type = ARM_CP_NOP }, 2880 { .name = "XSCALE_UNLOCK_ICACHE", 2881 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, 2882 .access = PL1_W, .type = ARM_CP_NOP }, 2883 { .name = "XSCALE_DCACHE_LOCK", 2884 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, 2885 .access = PL1_RW, .type = ARM_CP_NOP }, 2886 { .name = "XSCALE_UNLOCK_DCACHE", 2887 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, 2888 .access = PL1_W, .type = ARM_CP_NOP }, 2889 REGINFO_SENTINEL 2890 }; 2891 2892 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { 2893 /* RAZ/WI the whole crn=15 space, when we don't have a more specific 2894 * implementation of this implementation-defined space. 2895 * Ideally this should eventually disappear in favour of actually 2896 * implementing the correct behaviour for all cores. 2897 */ 2898 { .name = "C15_IMPDEF", .cp = 15, .crn = 15, 2899 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 2900 .access = PL1_RW, 2901 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, 2902 .resetvalue = 0 }, 2903 REGINFO_SENTINEL 2904 }; 2905 2906 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { 2907 /* Cache status: RAZ because we have no cache so it's always clean */ 2908 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, 2909 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2910 .resetvalue = 0 }, 2911 REGINFO_SENTINEL 2912 }; 2913 2914 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { 2915 /* We never have a a block transfer operation in progress */ 2916 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, 2917 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2918 .resetvalue = 0 }, 2919 /* The cache ops themselves: these all NOP for QEMU */ 2920 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, 2921 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2922 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, 2923 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2924 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, 2925 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2926 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, 2927 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2928 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, 2929 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2930 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, 2931 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2932 REGINFO_SENTINEL 2933 }; 2934 2935 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { 2936 /* The cache test-and-clean instructions always return (1 << 30) 2937 * to indicate that there are no dirty cache lines. 2938 */ 2939 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, 2940 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2941 .resetvalue = (1 << 30) }, 2942 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, 2943 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2944 .resetvalue = (1 << 30) }, 2945 REGINFO_SENTINEL 2946 }; 2947 2948 static const ARMCPRegInfo strongarm_cp_reginfo[] = { 2949 /* Ignore ReadBuffer accesses */ 2950 { .name = "C9_READBUFFER", .cp = 15, .crn = 9, 2951 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 2952 .access = PL1_RW, .resetvalue = 0, 2953 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, 2954 REGINFO_SENTINEL 2955 }; 2956 2957 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2958 { 2959 ARMCPU *cpu = arm_env_get_cpu(env); 2960 unsigned int cur_el = arm_current_el(env); 2961 bool secure = arm_is_secure(env); 2962 2963 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 2964 return env->cp15.vpidr_el2; 2965 } 2966 return raw_read(env, ri); 2967 } 2968 2969 static uint64_t mpidr_read_val(CPUARMState *env) 2970 { 2971 ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env)); 2972 uint64_t mpidr = cpu->mp_affinity; 2973 2974 if (arm_feature(env, ARM_FEATURE_V7MP)) { 2975 mpidr |= (1U << 31); 2976 /* Cores which are uniprocessor (non-coherent) 2977 * but still implement the MP extensions set 2978 * bit 30. (For instance, Cortex-R5). 2979 */ 2980 if (cpu->mp_is_up) { 2981 mpidr |= (1u << 30); 2982 } 2983 } 2984 return mpidr; 2985 } 2986 2987 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2988 { 2989 unsigned int cur_el = arm_current_el(env); 2990 bool secure = arm_is_secure(env); 2991 2992 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 2993 return env->cp15.vmpidr_el2; 2994 } 2995 return mpidr_read_val(env); 2996 } 2997 2998 static const ARMCPRegInfo mpidr_cp_reginfo[] = { 2999 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH, 3000 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, 3001 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, 3002 REGINFO_SENTINEL 3003 }; 3004 3005 static const ARMCPRegInfo lpae_cp_reginfo[] = { 3006 /* NOP AMAIR0/1 */ 3007 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, 3008 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, 3009 .access = PL1_RW, .type = ARM_CP_CONST, 3010 .resetvalue = 0 }, 3011 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ 3012 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, 3013 .access = PL1_RW, .type = ARM_CP_CONST, 3014 .resetvalue = 0 }, 3015 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, 3016 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, 3017 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), 3018 offsetof(CPUARMState, cp15.par_ns)} }, 3019 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, 3020 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3021 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 3022 offsetof(CPUARMState, cp15.ttbr0_ns) }, 3023 .writefn = vmsa_ttbr_write, }, 3024 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, 3025 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3026 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 3027 offsetof(CPUARMState, cp15.ttbr1_ns) }, 3028 .writefn = vmsa_ttbr_write, }, 3029 REGINFO_SENTINEL 3030 }; 3031 3032 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3033 { 3034 return vfp_get_fpcr(env); 3035 } 3036 3037 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3038 uint64_t value) 3039 { 3040 vfp_set_fpcr(env, value); 3041 } 3042 3043 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3044 { 3045 return vfp_get_fpsr(env); 3046 } 3047 3048 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3049 uint64_t value) 3050 { 3051 vfp_set_fpsr(env, value); 3052 } 3053 3054 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, 3055 bool isread) 3056 { 3057 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) { 3058 return CP_ACCESS_TRAP; 3059 } 3060 return CP_ACCESS_OK; 3061 } 3062 3063 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, 3064 uint64_t value) 3065 { 3066 env->daif = value & PSTATE_DAIF; 3067 } 3068 3069 static CPAccessResult aa64_cacheop_access(CPUARMState *env, 3070 const ARMCPRegInfo *ri, 3071 bool isread) 3072 { 3073 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless 3074 * SCTLR_EL1.UCI is set. 3075 */ 3076 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) { 3077 return CP_ACCESS_TRAP; 3078 } 3079 return CP_ACCESS_OK; 3080 } 3081 3082 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions 3083 * Page D4-1736 (DDI0487A.b) 3084 */ 3085 3086 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3087 uint64_t value) 3088 { 3089 CPUState *cs = ENV_GET_CPU(env); 3090 3091 if (arm_is_secure_below_el3(env)) { 3092 tlb_flush_by_mmuidx(cs, 3093 ARMMMUIdxBit_S1SE1 | 3094 ARMMMUIdxBit_S1SE0); 3095 } else { 3096 tlb_flush_by_mmuidx(cs, 3097 ARMMMUIdxBit_S12NSE1 | 3098 ARMMMUIdxBit_S12NSE0); 3099 } 3100 } 3101 3102 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3103 uint64_t value) 3104 { 3105 CPUState *cs = ENV_GET_CPU(env); 3106 bool sec = arm_is_secure_below_el3(env); 3107 3108 if (sec) { 3109 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3110 ARMMMUIdxBit_S1SE1 | 3111 ARMMMUIdxBit_S1SE0); 3112 } else { 3113 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3114 ARMMMUIdxBit_S12NSE1 | 3115 ARMMMUIdxBit_S12NSE0); 3116 } 3117 } 3118 3119 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3120 uint64_t value) 3121 { 3122 /* Note that the 'ALL' scope must invalidate both stage 1 and 3123 * stage 2 translations, whereas most other scopes only invalidate 3124 * stage 1 translations. 3125 */ 3126 ARMCPU *cpu = arm_env_get_cpu(env); 3127 CPUState *cs = CPU(cpu); 3128 3129 if (arm_is_secure_below_el3(env)) { 3130 tlb_flush_by_mmuidx(cs, 3131 ARMMMUIdxBit_S1SE1 | 3132 ARMMMUIdxBit_S1SE0); 3133 } else { 3134 if (arm_feature(env, ARM_FEATURE_EL2)) { 3135 tlb_flush_by_mmuidx(cs, 3136 ARMMMUIdxBit_S12NSE1 | 3137 ARMMMUIdxBit_S12NSE0 | 3138 ARMMMUIdxBit_S2NS); 3139 } else { 3140 tlb_flush_by_mmuidx(cs, 3141 ARMMMUIdxBit_S12NSE1 | 3142 ARMMMUIdxBit_S12NSE0); 3143 } 3144 } 3145 } 3146 3147 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3148 uint64_t value) 3149 { 3150 ARMCPU *cpu = arm_env_get_cpu(env); 3151 CPUState *cs = CPU(cpu); 3152 3153 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 3154 } 3155 3156 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3157 uint64_t value) 3158 { 3159 ARMCPU *cpu = arm_env_get_cpu(env); 3160 CPUState *cs = CPU(cpu); 3161 3162 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3); 3163 } 3164 3165 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3166 uint64_t value) 3167 { 3168 /* Note that the 'ALL' scope must invalidate both stage 1 and 3169 * stage 2 translations, whereas most other scopes only invalidate 3170 * stage 1 translations. 3171 */ 3172 CPUState *cs = ENV_GET_CPU(env); 3173 bool sec = arm_is_secure_below_el3(env); 3174 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2); 3175 3176 if (sec) { 3177 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3178 ARMMMUIdxBit_S1SE1 | 3179 ARMMMUIdxBit_S1SE0); 3180 } else if (has_el2) { 3181 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3182 ARMMMUIdxBit_S12NSE1 | 3183 ARMMMUIdxBit_S12NSE0 | 3184 ARMMMUIdxBit_S2NS); 3185 } else { 3186 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3187 ARMMMUIdxBit_S12NSE1 | 3188 ARMMMUIdxBit_S12NSE0); 3189 } 3190 } 3191 3192 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3193 uint64_t value) 3194 { 3195 CPUState *cs = ENV_GET_CPU(env); 3196 3197 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 3198 } 3199 3200 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3201 uint64_t value) 3202 { 3203 CPUState *cs = ENV_GET_CPU(env); 3204 3205 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3); 3206 } 3207 3208 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3209 uint64_t value) 3210 { 3211 /* Invalidate by VA, EL1&0 (AArch64 version). 3212 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, 3213 * since we don't support flush-for-specific-ASID-only or 3214 * flush-last-level-only. 3215 */ 3216 ARMCPU *cpu = arm_env_get_cpu(env); 3217 CPUState *cs = CPU(cpu); 3218 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3219 3220 if (arm_is_secure_below_el3(env)) { 3221 tlb_flush_page_by_mmuidx(cs, pageaddr, 3222 ARMMMUIdxBit_S1SE1 | 3223 ARMMMUIdxBit_S1SE0); 3224 } else { 3225 tlb_flush_page_by_mmuidx(cs, pageaddr, 3226 ARMMMUIdxBit_S12NSE1 | 3227 ARMMMUIdxBit_S12NSE0); 3228 } 3229 } 3230 3231 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3232 uint64_t value) 3233 { 3234 /* Invalidate by VA, EL2 3235 * Currently handles both VAE2 and VALE2, since we don't support 3236 * flush-last-level-only. 3237 */ 3238 ARMCPU *cpu = arm_env_get_cpu(env); 3239 CPUState *cs = CPU(cpu); 3240 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3241 3242 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); 3243 } 3244 3245 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3246 uint64_t value) 3247 { 3248 /* Invalidate by VA, EL3 3249 * Currently handles both VAE3 and VALE3, since we don't support 3250 * flush-last-level-only. 3251 */ 3252 ARMCPU *cpu = arm_env_get_cpu(env); 3253 CPUState *cs = CPU(cpu); 3254 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3255 3256 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3); 3257 } 3258 3259 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3260 uint64_t value) 3261 { 3262 ARMCPU *cpu = arm_env_get_cpu(env); 3263 CPUState *cs = CPU(cpu); 3264 bool sec = arm_is_secure_below_el3(env); 3265 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3266 3267 if (sec) { 3268 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3269 ARMMMUIdxBit_S1SE1 | 3270 ARMMMUIdxBit_S1SE0); 3271 } else { 3272 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3273 ARMMMUIdxBit_S12NSE1 | 3274 ARMMMUIdxBit_S12NSE0); 3275 } 3276 } 3277 3278 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3279 uint64_t value) 3280 { 3281 CPUState *cs = ENV_GET_CPU(env); 3282 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3283 3284 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3285 ARMMMUIdxBit_S1E2); 3286 } 3287 3288 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3289 uint64_t value) 3290 { 3291 CPUState *cs = ENV_GET_CPU(env); 3292 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3293 3294 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3295 ARMMMUIdxBit_S1E3); 3296 } 3297 3298 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3299 uint64_t value) 3300 { 3301 /* Invalidate by IPA. This has to invalidate any structures that 3302 * contain only stage 2 translation information, but does not need 3303 * to apply to structures that contain combined stage 1 and stage 2 3304 * translation information. 3305 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 3306 */ 3307 ARMCPU *cpu = arm_env_get_cpu(env); 3308 CPUState *cs = CPU(cpu); 3309 uint64_t pageaddr; 3310 3311 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 3312 return; 3313 } 3314 3315 pageaddr = sextract64(value << 12, 0, 48); 3316 3317 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS); 3318 } 3319 3320 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3321 uint64_t value) 3322 { 3323 CPUState *cs = ENV_GET_CPU(env); 3324 uint64_t pageaddr; 3325 3326 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 3327 return; 3328 } 3329 3330 pageaddr = sextract64(value << 12, 0, 48); 3331 3332 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3333 ARMMMUIdxBit_S2NS); 3334 } 3335 3336 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, 3337 bool isread) 3338 { 3339 /* We don't implement EL2, so the only control on DC ZVA is the 3340 * bit in the SCTLR which can prohibit access for EL0. 3341 */ 3342 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) { 3343 return CP_ACCESS_TRAP; 3344 } 3345 return CP_ACCESS_OK; 3346 } 3347 3348 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 3349 { 3350 ARMCPU *cpu = arm_env_get_cpu(env); 3351 int dzp_bit = 1 << 4; 3352 3353 /* DZP indicates whether DC ZVA access is allowed */ 3354 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { 3355 dzp_bit = 0; 3356 } 3357 return cpu->dcz_blocksize | dzp_bit; 3358 } 3359 3360 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 3361 bool isread) 3362 { 3363 if (!(env->pstate & PSTATE_SP)) { 3364 /* Access to SP_EL0 is undefined if it's being used as 3365 * the stack pointer. 3366 */ 3367 return CP_ACCESS_TRAP_UNCATEGORIZED; 3368 } 3369 return CP_ACCESS_OK; 3370 } 3371 3372 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) 3373 { 3374 return env->pstate & PSTATE_SP; 3375 } 3376 3377 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 3378 { 3379 update_spsel(env, val); 3380 } 3381 3382 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3383 uint64_t value) 3384 { 3385 ARMCPU *cpu = arm_env_get_cpu(env); 3386 3387 if (raw_read(env, ri) == value) { 3388 /* Skip the TLB flush if nothing actually changed; Linux likes 3389 * to do a lot of pointless SCTLR writes. 3390 */ 3391 return; 3392 } 3393 3394 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { 3395 /* M bit is RAZ/WI for PMSA with no MPU implemented */ 3396 value &= ~SCTLR_M; 3397 } 3398 3399 raw_write(env, ri, value); 3400 /* ??? Lots of these bits are not implemented. */ 3401 /* This may enable/disable the MMU, so do a TLB flush. */ 3402 tlb_flush(CPU(cpu)); 3403 } 3404 3405 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, 3406 bool isread) 3407 { 3408 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { 3409 return CP_ACCESS_TRAP_FP_EL2; 3410 } 3411 if (env->cp15.cptr_el[3] & CPTR_TFP) { 3412 return CP_ACCESS_TRAP_FP_EL3; 3413 } 3414 return CP_ACCESS_OK; 3415 } 3416 3417 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3418 uint64_t value) 3419 { 3420 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; 3421 } 3422 3423 static const ARMCPRegInfo v8_cp_reginfo[] = { 3424 /* Minimal set of EL0-visible registers. This will need to be expanded 3425 * significantly for system emulation of AArch64 CPUs. 3426 */ 3427 { .name = "NZCV", .state = ARM_CP_STATE_AA64, 3428 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, 3429 .access = PL0_RW, .type = ARM_CP_NZCV }, 3430 { .name = "DAIF", .state = ARM_CP_STATE_AA64, 3431 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, 3432 .type = ARM_CP_NO_RAW, 3433 .access = PL0_RW, .accessfn = aa64_daif_access, 3434 .fieldoffset = offsetof(CPUARMState, daif), 3435 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, 3436 { .name = "FPCR", .state = ARM_CP_STATE_AA64, 3437 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, 3438 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 3439 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, 3440 { .name = "FPSR", .state = ARM_CP_STATE_AA64, 3441 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, 3442 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 3443 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, 3444 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, 3445 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, 3446 .access = PL0_R, .type = ARM_CP_NO_RAW, 3447 .readfn = aa64_dczid_read }, 3448 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, 3449 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, 3450 .access = PL0_W, .type = ARM_CP_DC_ZVA, 3451 #ifndef CONFIG_USER_ONLY 3452 /* Avoid overhead of an access check that always passes in user-mode */ 3453 .accessfn = aa64_zva_access, 3454 #endif 3455 }, 3456 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, 3457 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, 3458 .access = PL1_R, .type = ARM_CP_CURRENTEL }, 3459 /* Cache ops: all NOPs since we don't emulate caches */ 3460 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, 3461 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 3462 .access = PL1_W, .type = ARM_CP_NOP }, 3463 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, 3464 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 3465 .access = PL1_W, .type = ARM_CP_NOP }, 3466 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, 3467 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, 3468 .access = PL0_W, .type = ARM_CP_NOP, 3469 .accessfn = aa64_cacheop_access }, 3470 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, 3471 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 3472 .access = PL1_W, .type = ARM_CP_NOP }, 3473 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, 3474 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 3475 .access = PL1_W, .type = ARM_CP_NOP }, 3476 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, 3477 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, 3478 .access = PL0_W, .type = ARM_CP_NOP, 3479 .accessfn = aa64_cacheop_access }, 3480 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, 3481 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 3482 .access = PL1_W, .type = ARM_CP_NOP }, 3483 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, 3484 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, 3485 .access = PL0_W, .type = ARM_CP_NOP, 3486 .accessfn = aa64_cacheop_access }, 3487 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, 3488 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, 3489 .access = PL0_W, .type = ARM_CP_NOP, 3490 .accessfn = aa64_cacheop_access }, 3491 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, 3492 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 3493 .access = PL1_W, .type = ARM_CP_NOP }, 3494 /* TLBI operations */ 3495 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, 3496 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 3497 .access = PL1_W, .type = ARM_CP_NO_RAW, 3498 .writefn = tlbi_aa64_vmalle1is_write }, 3499 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, 3500 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 3501 .access = PL1_W, .type = ARM_CP_NO_RAW, 3502 .writefn = tlbi_aa64_vae1is_write }, 3503 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, 3504 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 3505 .access = PL1_W, .type = ARM_CP_NO_RAW, 3506 .writefn = tlbi_aa64_vmalle1is_write }, 3507 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, 3508 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 3509 .access = PL1_W, .type = ARM_CP_NO_RAW, 3510 .writefn = tlbi_aa64_vae1is_write }, 3511 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, 3512 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 3513 .access = PL1_W, .type = ARM_CP_NO_RAW, 3514 .writefn = tlbi_aa64_vae1is_write }, 3515 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, 3516 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 3517 .access = PL1_W, .type = ARM_CP_NO_RAW, 3518 .writefn = tlbi_aa64_vae1is_write }, 3519 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, 3520 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 3521 .access = PL1_W, .type = ARM_CP_NO_RAW, 3522 .writefn = tlbi_aa64_vmalle1_write }, 3523 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, 3524 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 3525 .access = PL1_W, .type = ARM_CP_NO_RAW, 3526 .writefn = tlbi_aa64_vae1_write }, 3527 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, 3528 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 3529 .access = PL1_W, .type = ARM_CP_NO_RAW, 3530 .writefn = tlbi_aa64_vmalle1_write }, 3531 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, 3532 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 3533 .access = PL1_W, .type = ARM_CP_NO_RAW, 3534 .writefn = tlbi_aa64_vae1_write }, 3535 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, 3536 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 3537 .access = PL1_W, .type = ARM_CP_NO_RAW, 3538 .writefn = tlbi_aa64_vae1_write }, 3539 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, 3540 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 3541 .access = PL1_W, .type = ARM_CP_NO_RAW, 3542 .writefn = tlbi_aa64_vae1_write }, 3543 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, 3544 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 3545 .access = PL2_W, .type = ARM_CP_NO_RAW, 3546 .writefn = tlbi_aa64_ipas2e1is_write }, 3547 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, 3548 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 3549 .access = PL2_W, .type = ARM_CP_NO_RAW, 3550 .writefn = tlbi_aa64_ipas2e1is_write }, 3551 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, 3552 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 3553 .access = PL2_W, .type = ARM_CP_NO_RAW, 3554 .writefn = tlbi_aa64_alle1is_write }, 3555 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, 3556 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, 3557 .access = PL2_W, .type = ARM_CP_NO_RAW, 3558 .writefn = tlbi_aa64_alle1is_write }, 3559 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, 3560 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 3561 .access = PL2_W, .type = ARM_CP_NO_RAW, 3562 .writefn = tlbi_aa64_ipas2e1_write }, 3563 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, 3564 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 3565 .access = PL2_W, .type = ARM_CP_NO_RAW, 3566 .writefn = tlbi_aa64_ipas2e1_write }, 3567 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, 3568 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 3569 .access = PL2_W, .type = ARM_CP_NO_RAW, 3570 .writefn = tlbi_aa64_alle1_write }, 3571 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, 3572 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, 3573 .access = PL2_W, .type = ARM_CP_NO_RAW, 3574 .writefn = tlbi_aa64_alle1is_write }, 3575 #ifndef CONFIG_USER_ONLY 3576 /* 64 bit address translation operations */ 3577 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 3578 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, 3579 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3580 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 3581 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, 3582 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3583 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, 3584 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, 3585 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3586 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, 3587 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, 3588 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3589 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, 3590 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, 3591 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3592 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, 3593 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, 3594 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3595 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, 3596 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, 3597 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3598 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, 3599 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, 3600 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3601 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ 3602 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, 3603 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, 3604 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3605 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, 3606 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, 3607 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3608 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, 3609 .type = ARM_CP_ALIAS, 3610 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, 3611 .access = PL1_RW, .resetvalue = 0, 3612 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), 3613 .writefn = par_write }, 3614 #endif 3615 /* TLB invalidate last level of translation table walk */ 3616 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 3617 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 3618 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 3619 .type = ARM_CP_NO_RAW, .access = PL1_W, 3620 .writefn = tlbimvaa_is_write }, 3621 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 3622 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 3623 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 3624 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 3625 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 3626 .type = ARM_CP_NO_RAW, .access = PL2_W, 3627 .writefn = tlbimva_hyp_write }, 3628 { .name = "TLBIMVALHIS", 3629 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 3630 .type = ARM_CP_NO_RAW, .access = PL2_W, 3631 .writefn = tlbimva_hyp_is_write }, 3632 { .name = "TLBIIPAS2", 3633 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 3634 .type = ARM_CP_NO_RAW, .access = PL2_W, 3635 .writefn = tlbiipas2_write }, 3636 { .name = "TLBIIPAS2IS", 3637 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 3638 .type = ARM_CP_NO_RAW, .access = PL2_W, 3639 .writefn = tlbiipas2_is_write }, 3640 { .name = "TLBIIPAS2L", 3641 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 3642 .type = ARM_CP_NO_RAW, .access = PL2_W, 3643 .writefn = tlbiipas2_write }, 3644 { .name = "TLBIIPAS2LIS", 3645 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 3646 .type = ARM_CP_NO_RAW, .access = PL2_W, 3647 .writefn = tlbiipas2_is_write }, 3648 /* 32 bit cache operations */ 3649 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 3650 .type = ARM_CP_NOP, .access = PL1_W }, 3651 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, 3652 .type = ARM_CP_NOP, .access = PL1_W }, 3653 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 3654 .type = ARM_CP_NOP, .access = PL1_W }, 3655 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, 3656 .type = ARM_CP_NOP, .access = PL1_W }, 3657 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, 3658 .type = ARM_CP_NOP, .access = PL1_W }, 3659 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, 3660 .type = ARM_CP_NOP, .access = PL1_W }, 3661 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 3662 .type = ARM_CP_NOP, .access = PL1_W }, 3663 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 3664 .type = ARM_CP_NOP, .access = PL1_W }, 3665 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, 3666 .type = ARM_CP_NOP, .access = PL1_W }, 3667 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 3668 .type = ARM_CP_NOP, .access = PL1_W }, 3669 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, 3670 .type = ARM_CP_NOP, .access = PL1_W }, 3671 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, 3672 .type = ARM_CP_NOP, .access = PL1_W }, 3673 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 3674 .type = ARM_CP_NOP, .access = PL1_W }, 3675 /* MMU Domain access control / MPU write buffer control */ 3676 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, 3677 .access = PL1_RW, .resetvalue = 0, 3678 .writefn = dacr_write, .raw_writefn = raw_write, 3679 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 3680 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 3681 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, 3682 .type = ARM_CP_ALIAS, 3683 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, 3684 .access = PL1_RW, 3685 .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, 3686 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, 3687 .type = ARM_CP_ALIAS, 3688 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, 3689 .access = PL1_RW, 3690 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, 3691 /* We rely on the access checks not allowing the guest to write to the 3692 * state field when SPSel indicates that it's being used as the stack 3693 * pointer. 3694 */ 3695 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, 3696 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, 3697 .access = PL1_RW, .accessfn = sp_el0_access, 3698 .type = ARM_CP_ALIAS, 3699 .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, 3700 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, 3701 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, 3702 .access = PL2_RW, .type = ARM_CP_ALIAS, 3703 .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, 3704 { .name = "SPSel", .state = ARM_CP_STATE_AA64, 3705 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, 3706 .type = ARM_CP_NO_RAW, 3707 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, 3708 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, 3709 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, 3710 .type = ARM_CP_ALIAS, 3711 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), 3712 .access = PL2_RW, .accessfn = fpexc32_access }, 3713 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, 3714 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, 3715 .access = PL2_RW, .resetvalue = 0, 3716 .writefn = dacr_write, .raw_writefn = raw_write, 3717 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, 3718 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, 3719 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, 3720 .access = PL2_RW, .resetvalue = 0, 3721 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, 3722 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, 3723 .type = ARM_CP_ALIAS, 3724 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, 3725 .access = PL2_RW, 3726 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, 3727 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, 3728 .type = ARM_CP_ALIAS, 3729 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, 3730 .access = PL2_RW, 3731 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, 3732 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, 3733 .type = ARM_CP_ALIAS, 3734 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, 3735 .access = PL2_RW, 3736 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, 3737 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, 3738 .type = ARM_CP_ALIAS, 3739 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, 3740 .access = PL2_RW, 3741 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, 3742 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, 3743 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, 3744 .resetvalue = 0, 3745 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, 3746 { .name = "SDCR", .type = ARM_CP_ALIAS, 3747 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, 3748 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 3749 .writefn = sdcr_write, 3750 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, 3751 REGINFO_SENTINEL 3752 }; 3753 3754 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ 3755 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { 3756 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 3757 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 3758 .access = PL2_RW, 3759 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 3760 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH, 3761 .type = ARM_CP_NO_RAW, 3762 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 3763 .access = PL2_RW, 3764 .type = ARM_CP_CONST, .resetvalue = 0 }, 3765 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 3766 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 3767 .access = PL2_RW, 3768 .type = ARM_CP_CONST, .resetvalue = 0 }, 3769 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 3770 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 3771 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3772 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 3773 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 3774 .access = PL2_RW, .type = ARM_CP_CONST, 3775 .resetvalue = 0 }, 3776 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3777 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 3778 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3779 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 3780 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 3781 .access = PL2_RW, .type = ARM_CP_CONST, 3782 .resetvalue = 0 }, 3783 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 3784 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 3785 .access = PL2_RW, .type = ARM_CP_CONST, 3786 .resetvalue = 0 }, 3787 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 3788 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 3789 .access = PL2_RW, .type = ARM_CP_CONST, 3790 .resetvalue = 0 }, 3791 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 3792 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 3793 .access = PL2_RW, .type = ARM_CP_CONST, 3794 .resetvalue = 0 }, 3795 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 3796 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 3797 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3798 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, 3799 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 3800 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 3801 .type = ARM_CP_CONST, .resetvalue = 0 }, 3802 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 3803 .cp = 15, .opc1 = 6, .crm = 2, 3804 .access = PL2_RW, .accessfn = access_el3_aa32ns, 3805 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 3806 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 3807 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 3808 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3809 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 3810 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 3811 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3812 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 3813 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 3814 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3815 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 3816 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 3817 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3818 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 3819 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3820 .resetvalue = 0 }, 3821 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 3822 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 3823 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3824 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 3825 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 3826 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3827 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 3828 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3829 .resetvalue = 0 }, 3830 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 3831 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 3832 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3833 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 3834 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3835 .resetvalue = 0 }, 3836 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 3837 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 3838 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3839 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 3840 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 3841 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3842 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 3843 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 3844 .access = PL2_RW, .accessfn = access_tda, 3845 .type = ARM_CP_CONST, .resetvalue = 0 }, 3846 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, 3847 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 3848 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 3849 .type = ARM_CP_CONST, .resetvalue = 0 }, 3850 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 3851 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 3852 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3853 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 3854 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 3855 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3856 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 3857 .type = ARM_CP_CONST, 3858 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 3859 .access = PL2_RW, .resetvalue = 0 }, 3860 REGINFO_SENTINEL 3861 }; 3862 3863 /* Ditto, but for registers which exist in ARMv8 but not v7 */ 3864 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { 3865 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 3866 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 3867 .access = PL2_RW, 3868 .type = ARM_CP_CONST, .resetvalue = 0 }, 3869 REGINFO_SENTINEL 3870 }; 3871 3872 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3873 { 3874 ARMCPU *cpu = arm_env_get_cpu(env); 3875 uint64_t valid_mask = HCR_MASK; 3876 3877 if (arm_feature(env, ARM_FEATURE_EL3)) { 3878 valid_mask &= ~HCR_HCD; 3879 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { 3880 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. 3881 * However, if we're using the SMC PSCI conduit then QEMU is 3882 * effectively acting like EL3 firmware and so the guest at 3883 * EL2 should retain the ability to prevent EL1 from being 3884 * able to make SMC calls into the ersatz firmware, so in 3885 * that case HCR.TSC should be read/write. 3886 */ 3887 valid_mask &= ~HCR_TSC; 3888 } 3889 3890 /* Clear RES0 bits. */ 3891 value &= valid_mask; 3892 3893 /* These bits change the MMU setup: 3894 * HCR_VM enables stage 2 translation 3895 * HCR_PTW forbids certain page-table setups 3896 * HCR_DC Disables stage1 and enables stage2 translation 3897 */ 3898 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { 3899 tlb_flush(CPU(cpu)); 3900 } 3901 env->cp15.hcr_el2 = value; 3902 } 3903 3904 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri, 3905 uint64_t value) 3906 { 3907 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */ 3908 value = deposit64(env->cp15.hcr_el2, 32, 32, value); 3909 hcr_write(env, NULL, value); 3910 } 3911 3912 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, 3913 uint64_t value) 3914 { 3915 /* Handle HCR write, i.e. write to low half of HCR_EL2 */ 3916 value = deposit64(env->cp15.hcr_el2, 0, 32, value); 3917 hcr_write(env, NULL, value); 3918 } 3919 3920 static const ARMCPRegInfo el2_cp_reginfo[] = { 3921 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 3922 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 3923 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 3924 .writefn = hcr_write }, 3925 { .name = "HCR", .state = ARM_CP_STATE_AA32, 3926 .type = ARM_CP_ALIAS, 3927 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 3928 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 3929 .writefn = hcr_writelow }, 3930 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, 3931 .type = ARM_CP_ALIAS, 3932 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, 3933 .access = PL2_RW, 3934 .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, 3935 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 3936 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 3937 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, 3938 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 3939 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 3940 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, 3941 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 3942 .type = ARM_CP_ALIAS, 3943 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 3944 .access = PL2_RW, 3945 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, 3946 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, 3947 .type = ARM_CP_ALIAS, 3948 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, 3949 .access = PL2_RW, 3950 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, 3951 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 3952 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 3953 .access = PL2_RW, .writefn = vbar_write, 3954 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), 3955 .resetvalue = 0 }, 3956 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, 3957 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, 3958 .access = PL3_RW, .type = ARM_CP_ALIAS, 3959 .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, 3960 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 3961 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 3962 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, 3963 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) }, 3964 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 3965 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 3966 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), 3967 .resetvalue = 0 }, 3968 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3969 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 3970 .access = PL2_RW, .type = ARM_CP_ALIAS, 3971 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, 3972 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 3973 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 3974 .access = PL2_RW, .type = ARM_CP_CONST, 3975 .resetvalue = 0 }, 3976 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ 3977 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 3978 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 3979 .access = PL2_RW, .type = ARM_CP_CONST, 3980 .resetvalue = 0 }, 3981 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 3982 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 3983 .access = PL2_RW, .type = ARM_CP_CONST, 3984 .resetvalue = 0 }, 3985 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 3986 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 3987 .access = PL2_RW, .type = ARM_CP_CONST, 3988 .resetvalue = 0 }, 3989 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 3990 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 3991 .access = PL2_RW, 3992 /* no .writefn needed as this can't cause an ASID change; 3993 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 3994 */ 3995 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, 3996 { .name = "VTCR", .state = ARM_CP_STATE_AA32, 3997 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 3998 .type = ARM_CP_ALIAS, 3999 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4000 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 4001 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, 4002 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 4003 .access = PL2_RW, 4004 /* no .writefn needed as this can't cause an ASID change; 4005 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 4006 */ 4007 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 4008 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 4009 .cp = 15, .opc1 = 6, .crm = 2, 4010 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4011 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4012 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), 4013 .writefn = vttbr_write }, 4014 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 4015 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 4016 .access = PL2_RW, .writefn = vttbr_write, 4017 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, 4018 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 4019 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 4020 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, 4021 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, 4022 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 4023 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 4024 .access = PL2_RW, .resetvalue = 0, 4025 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, 4026 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 4027 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 4028 .access = PL2_RW, .resetvalue = 0, 4029 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 4030 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 4031 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4032 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 4033 { .name = "TLBIALLNSNH", 4034 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 4035 .type = ARM_CP_NO_RAW, .access = PL2_W, 4036 .writefn = tlbiall_nsnh_write }, 4037 { .name = "TLBIALLNSNHIS", 4038 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 4039 .type = ARM_CP_NO_RAW, .access = PL2_W, 4040 .writefn = tlbiall_nsnh_is_write }, 4041 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 4042 .type = ARM_CP_NO_RAW, .access = PL2_W, 4043 .writefn = tlbiall_hyp_write }, 4044 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 4045 .type = ARM_CP_NO_RAW, .access = PL2_W, 4046 .writefn = tlbiall_hyp_is_write }, 4047 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 4048 .type = ARM_CP_NO_RAW, .access = PL2_W, 4049 .writefn = tlbimva_hyp_write }, 4050 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 4051 .type = ARM_CP_NO_RAW, .access = PL2_W, 4052 .writefn = tlbimva_hyp_is_write }, 4053 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, 4054 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 4055 .type = ARM_CP_NO_RAW, .access = PL2_W, 4056 .writefn = tlbi_aa64_alle2_write }, 4057 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, 4058 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 4059 .type = ARM_CP_NO_RAW, .access = PL2_W, 4060 .writefn = tlbi_aa64_vae2_write }, 4061 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, 4062 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 4063 .access = PL2_W, .type = ARM_CP_NO_RAW, 4064 .writefn = tlbi_aa64_vae2_write }, 4065 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, 4066 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 4067 .access = PL2_W, .type = ARM_CP_NO_RAW, 4068 .writefn = tlbi_aa64_alle2is_write }, 4069 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, 4070 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 4071 .type = ARM_CP_NO_RAW, .access = PL2_W, 4072 .writefn = tlbi_aa64_vae2is_write }, 4073 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, 4074 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 4075 .access = PL2_W, .type = ARM_CP_NO_RAW, 4076 .writefn = tlbi_aa64_vae2is_write }, 4077 #ifndef CONFIG_USER_ONLY 4078 /* Unlike the other EL2-related AT operations, these must 4079 * UNDEF from EL3 if EL2 is not implemented, which is why we 4080 * define them here rather than with the rest of the AT ops. 4081 */ 4082 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, 4083 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 4084 .access = PL2_W, .accessfn = at_s1e2_access, 4085 .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4086 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, 4087 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 4088 .access = PL2_W, .accessfn = at_s1e2_access, 4089 .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4090 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE 4091 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 4092 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose 4093 * to behave as if SCR.NS was 1. 4094 */ 4095 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 4096 .access = PL2_W, 4097 .writefn = ats1h_write, .type = ARM_CP_NO_RAW }, 4098 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 4099 .access = PL2_W, 4100 .writefn = ats1h_write, .type = ARM_CP_NO_RAW }, 4101 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 4102 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 4103 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the 4104 * reset values as IMPDEF. We choose to reset to 3 to comply with 4105 * both ARMv7 and ARMv8. 4106 */ 4107 .access = PL2_RW, .resetvalue = 3, 4108 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, 4109 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 4110 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 4111 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 4112 .writefn = gt_cntvoff_write, 4113 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 4114 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 4115 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, 4116 .writefn = gt_cntvoff_write, 4117 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 4118 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 4119 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 4120 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 4121 .type = ARM_CP_IO, .access = PL2_RW, 4122 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 4123 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 4124 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 4125 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, 4126 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 4127 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 4128 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 4129 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 4130 .resetfn = gt_hyp_timer_reset, 4131 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, 4132 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 4133 .type = ARM_CP_IO, 4134 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 4135 .access = PL2_RW, 4136 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), 4137 .resetvalue = 0, 4138 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, 4139 #endif 4140 /* The only field of MDCR_EL2 that has a defined architectural reset value 4141 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we 4142 * don't impelment any PMU event counters, so using zero as a reset 4143 * value for MDCR_EL2 is okay 4144 */ 4145 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 4146 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 4147 .access = PL2_RW, .resetvalue = 0, 4148 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, 4149 { .name = "HPFAR", .state = ARM_CP_STATE_AA32, 4150 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4151 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4152 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 4153 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, 4154 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4155 .access = PL2_RW, 4156 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 4157 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 4158 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 4159 .access = PL2_RW, 4160 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, 4161 REGINFO_SENTINEL 4162 }; 4163 4164 static const ARMCPRegInfo el2_v8_cp_reginfo[] = { 4165 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 4166 .type = ARM_CP_ALIAS, 4167 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 4168 .access = PL2_RW, 4169 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), 4170 .writefn = hcr_writehigh }, 4171 REGINFO_SENTINEL 4172 }; 4173 4174 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 4175 bool isread) 4176 { 4177 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. 4178 * At Secure EL1 it traps to EL3. 4179 */ 4180 if (arm_current_el(env) == 3) { 4181 return CP_ACCESS_OK; 4182 } 4183 if (arm_is_secure_below_el3(env)) { 4184 return CP_ACCESS_TRAP_EL3; 4185 } 4186 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ 4187 if (isread) { 4188 return CP_ACCESS_OK; 4189 } 4190 return CP_ACCESS_TRAP_UNCATEGORIZED; 4191 } 4192 4193 static const ARMCPRegInfo el3_cp_reginfo[] = { 4194 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, 4195 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, 4196 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), 4197 .resetvalue = 0, .writefn = scr_write }, 4198 { .name = "SCR", .type = ARM_CP_ALIAS, 4199 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, 4200 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4201 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), 4202 .writefn = scr_write }, 4203 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, 4204 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, 4205 .access = PL3_RW, .resetvalue = 0, 4206 .fieldoffset = offsetof(CPUARMState, cp15.sder) }, 4207 { .name = "SDER", 4208 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, 4209 .access = PL3_RW, .resetvalue = 0, 4210 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, 4211 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 4212 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4213 .writefn = vbar_write, .resetvalue = 0, 4214 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, 4215 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, 4216 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, 4217 .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 4218 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, 4219 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, 4220 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, 4221 .access = PL3_RW, 4222 /* no .writefn needed as this can't cause an ASID change; 4223 * we must provide a .raw_writefn and .resetfn because we handle 4224 * reset and migration for the AArch32 TTBCR(S), which might be 4225 * using mask and base_mask. 4226 */ 4227 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, 4228 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, 4229 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, 4230 .type = ARM_CP_ALIAS, 4231 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, 4232 .access = PL3_RW, 4233 .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, 4234 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, 4235 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, 4236 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, 4237 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, 4238 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, 4239 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, 4240 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, 4241 .type = ARM_CP_ALIAS, 4242 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, 4243 .access = PL3_RW, 4244 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, 4245 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, 4246 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, 4247 .access = PL3_RW, .writefn = vbar_write, 4248 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), 4249 .resetvalue = 0 }, 4250 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, 4251 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, 4252 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, 4253 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, 4254 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, 4255 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, 4256 .access = PL3_RW, .resetvalue = 0, 4257 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, 4258 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, 4259 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, 4260 .access = PL3_RW, .type = ARM_CP_CONST, 4261 .resetvalue = 0 }, 4262 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, 4263 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, 4264 .access = PL3_RW, .type = ARM_CP_CONST, 4265 .resetvalue = 0 }, 4266 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, 4267 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, 4268 .access = PL3_RW, .type = ARM_CP_CONST, 4269 .resetvalue = 0 }, 4270 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, 4271 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, 4272 .access = PL3_W, .type = ARM_CP_NO_RAW, 4273 .writefn = tlbi_aa64_alle3is_write }, 4274 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, 4275 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, 4276 .access = PL3_W, .type = ARM_CP_NO_RAW, 4277 .writefn = tlbi_aa64_vae3is_write }, 4278 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, 4279 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, 4280 .access = PL3_W, .type = ARM_CP_NO_RAW, 4281 .writefn = tlbi_aa64_vae3is_write }, 4282 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, 4283 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, 4284 .access = PL3_W, .type = ARM_CP_NO_RAW, 4285 .writefn = tlbi_aa64_alle3_write }, 4286 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, 4287 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, 4288 .access = PL3_W, .type = ARM_CP_NO_RAW, 4289 .writefn = tlbi_aa64_vae3_write }, 4290 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, 4291 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, 4292 .access = PL3_W, .type = ARM_CP_NO_RAW, 4293 .writefn = tlbi_aa64_vae3_write }, 4294 REGINFO_SENTINEL 4295 }; 4296 4297 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 4298 bool isread) 4299 { 4300 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64, 4301 * but the AArch32 CTR has its own reginfo struct) 4302 */ 4303 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) { 4304 return CP_ACCESS_TRAP; 4305 } 4306 return CP_ACCESS_OK; 4307 } 4308 4309 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 4310 uint64_t value) 4311 { 4312 /* Writes to OSLAR_EL1 may update the OS lock status, which can be 4313 * read via a bit in OSLSR_EL1. 4314 */ 4315 int oslock; 4316 4317 if (ri->state == ARM_CP_STATE_AA32) { 4318 oslock = (value == 0xC5ACCE55); 4319 } else { 4320 oslock = value & 1; 4321 } 4322 4323 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 4324 } 4325 4326 static const ARMCPRegInfo debug_cp_reginfo[] = { 4327 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 4328 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 4329 * unlike DBGDRAR it is never accessible from EL0. 4330 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 4331 * accessor. 4332 */ 4333 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 4334 .access = PL0_R, .accessfn = access_tdra, 4335 .type = ARM_CP_CONST, .resetvalue = 0 }, 4336 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 4337 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 4338 .access = PL1_R, .accessfn = access_tdra, 4339 .type = ARM_CP_CONST, .resetvalue = 0 }, 4340 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 4341 .access = PL0_R, .accessfn = access_tdra, 4342 .type = ARM_CP_CONST, .resetvalue = 0 }, 4343 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 4344 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 4345 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 4346 .access = PL1_RW, .accessfn = access_tda, 4347 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 4348 .resetvalue = 0 }, 4349 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. 4350 * We don't implement the configurable EL0 access. 4351 */ 4352 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH, 4353 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 4354 .type = ARM_CP_ALIAS, 4355 .access = PL1_R, .accessfn = access_tda, 4356 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 4357 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 4358 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 4359 .access = PL1_W, .type = ARM_CP_NO_RAW, 4360 .accessfn = access_tdosa, 4361 .writefn = oslar_write }, 4362 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 4363 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 4364 .access = PL1_R, .resetvalue = 10, 4365 .accessfn = access_tdosa, 4366 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 4367 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 4368 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 4369 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 4370 .access = PL1_RW, .accessfn = access_tdosa, 4371 .type = ARM_CP_NOP }, 4372 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't 4373 * implement vector catch debug events yet. 4374 */ 4375 { .name = "DBGVCR", 4376 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 4377 .access = PL1_RW, .accessfn = access_tda, 4378 .type = ARM_CP_NOP }, 4379 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 4380 * to save and restore a 32-bit guest's DBGVCR) 4381 */ 4382 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 4383 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 4384 .access = PL2_RW, .accessfn = access_tda, 4385 .type = ARM_CP_NOP }, 4386 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications 4387 * Channel but Linux may try to access this register. The 32-bit 4388 * alias is DBGDCCINT. 4389 */ 4390 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 4391 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 4392 .access = PL1_RW, .accessfn = access_tda, 4393 .type = ARM_CP_NOP }, 4394 REGINFO_SENTINEL 4395 }; 4396 4397 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 4398 /* 64 bit access versions of the (dummy) debug registers */ 4399 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 4400 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 4401 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 4402 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 4403 REGINFO_SENTINEL 4404 }; 4405 4406 /* Return the exception level to which exceptions should be taken 4407 * via SVEAccessTrap. If an exception should be routed through 4408 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should 4409 * take care of raising that exception. 4410 * C.f. the ARM pseudocode function CheckSVEEnabled. 4411 */ 4412 int sve_exception_el(CPUARMState *env, int el) 4413 { 4414 #ifndef CONFIG_USER_ONLY 4415 if (el <= 1) { 4416 bool disabled = false; 4417 4418 /* The CPACR.ZEN controls traps to EL1: 4419 * 0, 2 : trap EL0 and EL1 accesses 4420 * 1 : trap only EL0 accesses 4421 * 3 : trap no accesses 4422 */ 4423 if (!extract32(env->cp15.cpacr_el1, 16, 1)) { 4424 disabled = true; 4425 } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) { 4426 disabled = el == 0; 4427 } 4428 if (disabled) { 4429 /* route_to_el2 */ 4430 return (arm_feature(env, ARM_FEATURE_EL2) 4431 && !arm_is_secure(env) 4432 && (env->cp15.hcr_el2 & HCR_TGE) ? 2 : 1); 4433 } 4434 4435 /* Check CPACR.FPEN. */ 4436 if (!extract32(env->cp15.cpacr_el1, 20, 1)) { 4437 disabled = true; 4438 } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) { 4439 disabled = el == 0; 4440 } 4441 if (disabled) { 4442 return 0; 4443 } 4444 } 4445 4446 /* CPTR_EL2. Since TZ and TFP are positive, 4447 * they will be zero when EL2 is not present. 4448 */ 4449 if (el <= 2 && !arm_is_secure_below_el3(env)) { 4450 if (env->cp15.cptr_el[2] & CPTR_TZ) { 4451 return 2; 4452 } 4453 if (env->cp15.cptr_el[2] & CPTR_TFP) { 4454 return 0; 4455 } 4456 } 4457 4458 /* CPTR_EL3. Since EZ is negative we must check for EL3. */ 4459 if (arm_feature(env, ARM_FEATURE_EL3) 4460 && !(env->cp15.cptr_el[3] & CPTR_EZ)) { 4461 return 3; 4462 } 4463 #endif 4464 return 0; 4465 } 4466 4467 /* 4468 * Given that SVE is enabled, return the vector length for EL. 4469 */ 4470 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) 4471 { 4472 ARMCPU *cpu = arm_env_get_cpu(env); 4473 uint32_t zcr_len = cpu->sve_max_vq - 1; 4474 4475 if (el <= 1) { 4476 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]); 4477 } 4478 if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { 4479 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); 4480 } 4481 if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) { 4482 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); 4483 } 4484 return zcr_len; 4485 } 4486 4487 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4488 uint64_t value) 4489 { 4490 int cur_el = arm_current_el(env); 4491 int old_len = sve_zcr_len_for_el(env, cur_el); 4492 int new_len; 4493 4494 /* Bits other than [3:0] are RAZ/WI. */ 4495 raw_write(env, ri, value & 0xf); 4496 4497 /* 4498 * Because we arrived here, we know both FP and SVE are enabled; 4499 * otherwise we would have trapped access to the ZCR_ELn register. 4500 */ 4501 new_len = sve_zcr_len_for_el(env, cur_el); 4502 if (new_len < old_len) { 4503 aarch64_sve_narrow_vq(env, new_len + 1); 4504 } 4505 } 4506 4507 static const ARMCPRegInfo zcr_el1_reginfo = { 4508 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, 4509 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, 4510 .access = PL1_RW, .type = ARM_CP_SVE, 4511 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), 4512 .writefn = zcr_write, .raw_writefn = raw_write 4513 }; 4514 4515 static const ARMCPRegInfo zcr_el2_reginfo = { 4516 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 4517 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 4518 .access = PL2_RW, .type = ARM_CP_SVE, 4519 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), 4520 .writefn = zcr_write, .raw_writefn = raw_write 4521 }; 4522 4523 static const ARMCPRegInfo zcr_no_el2_reginfo = { 4524 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 4525 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 4526 .access = PL2_RW, .type = ARM_CP_SVE, 4527 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore 4528 }; 4529 4530 static const ARMCPRegInfo zcr_el3_reginfo = { 4531 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, 4532 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, 4533 .access = PL3_RW, .type = ARM_CP_SVE, 4534 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), 4535 .writefn = zcr_write, .raw_writefn = raw_write 4536 }; 4537 4538 void hw_watchpoint_update(ARMCPU *cpu, int n) 4539 { 4540 CPUARMState *env = &cpu->env; 4541 vaddr len = 0; 4542 vaddr wvr = env->cp15.dbgwvr[n]; 4543 uint64_t wcr = env->cp15.dbgwcr[n]; 4544 int mask; 4545 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 4546 4547 if (env->cpu_watchpoint[n]) { 4548 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 4549 env->cpu_watchpoint[n] = NULL; 4550 } 4551 4552 if (!extract64(wcr, 0, 1)) { 4553 /* E bit clear : watchpoint disabled */ 4554 return; 4555 } 4556 4557 switch (extract64(wcr, 3, 2)) { 4558 case 0: 4559 /* LSC 00 is reserved and must behave as if the wp is disabled */ 4560 return; 4561 case 1: 4562 flags |= BP_MEM_READ; 4563 break; 4564 case 2: 4565 flags |= BP_MEM_WRITE; 4566 break; 4567 case 3: 4568 flags |= BP_MEM_ACCESS; 4569 break; 4570 } 4571 4572 /* Attempts to use both MASK and BAS fields simultaneously are 4573 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 4574 * thus generating a watchpoint for every byte in the masked region. 4575 */ 4576 mask = extract64(wcr, 24, 4); 4577 if (mask == 1 || mask == 2) { 4578 /* Reserved values of MASK; we must act as if the mask value was 4579 * some non-reserved value, or as if the watchpoint were disabled. 4580 * We choose the latter. 4581 */ 4582 return; 4583 } else if (mask) { 4584 /* Watchpoint covers an aligned area up to 2GB in size */ 4585 len = 1ULL << mask; 4586 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 4587 * whether the watchpoint fires when the unmasked bits match; we opt 4588 * to generate the exceptions. 4589 */ 4590 wvr &= ~(len - 1); 4591 } else { 4592 /* Watchpoint covers bytes defined by the byte address select bits */ 4593 int bas = extract64(wcr, 5, 8); 4594 int basstart; 4595 4596 if (bas == 0) { 4597 /* This must act as if the watchpoint is disabled */ 4598 return; 4599 } 4600 4601 if (extract64(wvr, 2, 1)) { 4602 /* Deprecated case of an only 4-aligned address. BAS[7:4] are 4603 * ignored, and BAS[3:0] define which bytes to watch. 4604 */ 4605 bas &= 0xf; 4606 } 4607 /* The BAS bits are supposed to be programmed to indicate a contiguous 4608 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 4609 * we fire for each byte in the word/doubleword addressed by the WVR. 4610 * We choose to ignore any non-zero bits after the first range of 1s. 4611 */ 4612 basstart = ctz32(bas); 4613 len = cto32(bas >> basstart); 4614 wvr += basstart; 4615 } 4616 4617 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 4618 &env->cpu_watchpoint[n]); 4619 } 4620 4621 void hw_watchpoint_update_all(ARMCPU *cpu) 4622 { 4623 int i; 4624 CPUARMState *env = &cpu->env; 4625 4626 /* Completely clear out existing QEMU watchpoints and our array, to 4627 * avoid possible stale entries following migration load. 4628 */ 4629 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 4630 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 4631 4632 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 4633 hw_watchpoint_update(cpu, i); 4634 } 4635 } 4636 4637 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4638 uint64_t value) 4639 { 4640 ARMCPU *cpu = arm_env_get_cpu(env); 4641 int i = ri->crm; 4642 4643 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the 4644 * register reads and behaves as if values written are sign extended. 4645 * Bits [1:0] are RES0. 4646 */ 4647 value = sextract64(value, 0, 49) & ~3ULL; 4648 4649 raw_write(env, ri, value); 4650 hw_watchpoint_update(cpu, i); 4651 } 4652 4653 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4654 uint64_t value) 4655 { 4656 ARMCPU *cpu = arm_env_get_cpu(env); 4657 int i = ri->crm; 4658 4659 raw_write(env, ri, value); 4660 hw_watchpoint_update(cpu, i); 4661 } 4662 4663 void hw_breakpoint_update(ARMCPU *cpu, int n) 4664 { 4665 CPUARMState *env = &cpu->env; 4666 uint64_t bvr = env->cp15.dbgbvr[n]; 4667 uint64_t bcr = env->cp15.dbgbcr[n]; 4668 vaddr addr; 4669 int bt; 4670 int flags = BP_CPU; 4671 4672 if (env->cpu_breakpoint[n]) { 4673 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 4674 env->cpu_breakpoint[n] = NULL; 4675 } 4676 4677 if (!extract64(bcr, 0, 1)) { 4678 /* E bit clear : watchpoint disabled */ 4679 return; 4680 } 4681 4682 bt = extract64(bcr, 20, 4); 4683 4684 switch (bt) { 4685 case 4: /* unlinked address mismatch (reserved if AArch64) */ 4686 case 5: /* linked address mismatch (reserved if AArch64) */ 4687 qemu_log_mask(LOG_UNIMP, 4688 "arm: address mismatch breakpoint types not implemented\n"); 4689 return; 4690 case 0: /* unlinked address match */ 4691 case 1: /* linked address match */ 4692 { 4693 /* Bits [63:49] are hardwired to the value of bit [48]; that is, 4694 * we behave as if the register was sign extended. Bits [1:0] are 4695 * RES0. The BAS field is used to allow setting breakpoints on 16 4696 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether 4697 * a bp will fire if the addresses covered by the bp and the addresses 4698 * covered by the insn overlap but the insn doesn't start at the 4699 * start of the bp address range. We choose to require the insn and 4700 * the bp to have the same address. The constraints on writing to 4701 * BAS enforced in dbgbcr_write mean we have only four cases: 4702 * 0b0000 => no breakpoint 4703 * 0b0011 => breakpoint on addr 4704 * 0b1100 => breakpoint on addr + 2 4705 * 0b1111 => breakpoint on addr 4706 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 4707 */ 4708 int bas = extract64(bcr, 5, 4); 4709 addr = sextract64(bvr, 0, 49) & ~3ULL; 4710 if (bas == 0) { 4711 return; 4712 } 4713 if (bas == 0xc) { 4714 addr += 2; 4715 } 4716 break; 4717 } 4718 case 2: /* unlinked context ID match */ 4719 case 8: /* unlinked VMID match (reserved if no EL2) */ 4720 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 4721 qemu_log_mask(LOG_UNIMP, 4722 "arm: unlinked context breakpoint types not implemented\n"); 4723 return; 4724 case 9: /* linked VMID match (reserved if no EL2) */ 4725 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 4726 case 3: /* linked context ID match */ 4727 default: 4728 /* We must generate no events for Linked context matches (unless 4729 * they are linked to by some other bp/wp, which is handled in 4730 * updates for the linking bp/wp). We choose to also generate no events 4731 * for reserved values. 4732 */ 4733 return; 4734 } 4735 4736 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 4737 } 4738 4739 void hw_breakpoint_update_all(ARMCPU *cpu) 4740 { 4741 int i; 4742 CPUARMState *env = &cpu->env; 4743 4744 /* Completely clear out existing QEMU breakpoints and our array, to 4745 * avoid possible stale entries following migration load. 4746 */ 4747 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 4748 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 4749 4750 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 4751 hw_breakpoint_update(cpu, i); 4752 } 4753 } 4754 4755 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4756 uint64_t value) 4757 { 4758 ARMCPU *cpu = arm_env_get_cpu(env); 4759 int i = ri->crm; 4760 4761 raw_write(env, ri, value); 4762 hw_breakpoint_update(cpu, i); 4763 } 4764 4765 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4766 uint64_t value) 4767 { 4768 ARMCPU *cpu = arm_env_get_cpu(env); 4769 int i = ri->crm; 4770 4771 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 4772 * copy of BAS[0]. 4773 */ 4774 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 4775 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 4776 4777 raw_write(env, ri, value); 4778 hw_breakpoint_update(cpu, i); 4779 } 4780 4781 static void define_debug_regs(ARMCPU *cpu) 4782 { 4783 /* Define v7 and v8 architectural debug registers. 4784 * These are just dummy implementations for now. 4785 */ 4786 int i; 4787 int wrps, brps, ctx_cmps; 4788 ARMCPRegInfo dbgdidr = { 4789 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 4790 .access = PL0_R, .accessfn = access_tda, 4791 .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr, 4792 }; 4793 4794 /* Note that all these register fields hold "number of Xs minus 1". */ 4795 brps = extract32(cpu->dbgdidr, 24, 4); 4796 wrps = extract32(cpu->dbgdidr, 28, 4); 4797 ctx_cmps = extract32(cpu->dbgdidr, 20, 4); 4798 4799 assert(ctx_cmps <= brps); 4800 4801 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties 4802 * of the debug registers such as number of breakpoints; 4803 * check that if they both exist then they agree. 4804 */ 4805 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 4806 assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps); 4807 assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps); 4808 assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps); 4809 } 4810 4811 define_one_arm_cp_reg(cpu, &dbgdidr); 4812 define_arm_cp_regs(cpu, debug_cp_reginfo); 4813 4814 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 4815 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 4816 } 4817 4818 for (i = 0; i < brps + 1; i++) { 4819 ARMCPRegInfo dbgregs[] = { 4820 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, 4821 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 4822 .access = PL1_RW, .accessfn = access_tda, 4823 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 4824 .writefn = dbgbvr_write, .raw_writefn = raw_write 4825 }, 4826 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, 4827 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 4828 .access = PL1_RW, .accessfn = access_tda, 4829 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 4830 .writefn = dbgbcr_write, .raw_writefn = raw_write 4831 }, 4832 REGINFO_SENTINEL 4833 }; 4834 define_arm_cp_regs(cpu, dbgregs); 4835 } 4836 4837 for (i = 0; i < wrps + 1; i++) { 4838 ARMCPRegInfo dbgregs[] = { 4839 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, 4840 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 4841 .access = PL1_RW, .accessfn = access_tda, 4842 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 4843 .writefn = dbgwvr_write, .raw_writefn = raw_write 4844 }, 4845 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, 4846 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 4847 .access = PL1_RW, .accessfn = access_tda, 4848 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 4849 .writefn = dbgwcr_write, .raw_writefn = raw_write 4850 }, 4851 REGINFO_SENTINEL 4852 }; 4853 define_arm_cp_regs(cpu, dbgregs); 4854 } 4855 } 4856 4857 /* We don't know until after realize whether there's a GICv3 4858 * attached, and that is what registers the gicv3 sysregs. 4859 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 4860 * at runtime. 4861 */ 4862 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 4863 { 4864 ARMCPU *cpu = arm_env_get_cpu(env); 4865 uint64_t pfr1 = cpu->id_pfr1; 4866 4867 if (env->gicv3state) { 4868 pfr1 |= 1 << 28; 4869 } 4870 return pfr1; 4871 } 4872 4873 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 4874 { 4875 ARMCPU *cpu = arm_env_get_cpu(env); 4876 uint64_t pfr0 = cpu->id_aa64pfr0; 4877 4878 if (env->gicv3state) { 4879 pfr0 |= 1 << 24; 4880 } 4881 return pfr0; 4882 } 4883 4884 void register_cp_regs_for_features(ARMCPU *cpu) 4885 { 4886 /* Register all the coprocessor registers based on feature bits */ 4887 CPUARMState *env = &cpu->env; 4888 if (arm_feature(env, ARM_FEATURE_M)) { 4889 /* M profile has no coprocessor registers */ 4890 return; 4891 } 4892 4893 define_arm_cp_regs(cpu, cp_reginfo); 4894 if (!arm_feature(env, ARM_FEATURE_V8)) { 4895 /* Must go early as it is full of wildcards that may be 4896 * overridden by later definitions. 4897 */ 4898 define_arm_cp_regs(cpu, not_v8_cp_reginfo); 4899 } 4900 4901 if (arm_feature(env, ARM_FEATURE_V6)) { 4902 /* The ID registers all have impdef reset values */ 4903 ARMCPRegInfo v6_idregs[] = { 4904 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, 4905 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 4906 .access = PL1_R, .type = ARM_CP_CONST, 4907 .resetvalue = cpu->id_pfr0 }, 4908 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know 4909 * the value of the GIC field until after we define these regs. 4910 */ 4911 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, 4912 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, 4913 .access = PL1_R, .type = ARM_CP_NO_RAW, 4914 .readfn = id_pfr1_read, 4915 .writefn = arm_cp_write_ignore }, 4916 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, 4917 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, 4918 .access = PL1_R, .type = ARM_CP_CONST, 4919 .resetvalue = cpu->id_dfr0 }, 4920 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, 4921 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, 4922 .access = PL1_R, .type = ARM_CP_CONST, 4923 .resetvalue = cpu->id_afr0 }, 4924 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, 4925 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, 4926 .access = PL1_R, .type = ARM_CP_CONST, 4927 .resetvalue = cpu->id_mmfr0 }, 4928 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, 4929 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, 4930 .access = PL1_R, .type = ARM_CP_CONST, 4931 .resetvalue = cpu->id_mmfr1 }, 4932 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, 4933 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, 4934 .access = PL1_R, .type = ARM_CP_CONST, 4935 .resetvalue = cpu->id_mmfr2 }, 4936 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, 4937 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, 4938 .access = PL1_R, .type = ARM_CP_CONST, 4939 .resetvalue = cpu->id_mmfr3 }, 4940 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, 4941 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 4942 .access = PL1_R, .type = ARM_CP_CONST, 4943 .resetvalue = cpu->id_isar0 }, 4944 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, 4945 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, 4946 .access = PL1_R, .type = ARM_CP_CONST, 4947 .resetvalue = cpu->id_isar1 }, 4948 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, 4949 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 4950 .access = PL1_R, .type = ARM_CP_CONST, 4951 .resetvalue = cpu->id_isar2 }, 4952 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, 4953 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, 4954 .access = PL1_R, .type = ARM_CP_CONST, 4955 .resetvalue = cpu->id_isar3 }, 4956 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, 4957 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, 4958 .access = PL1_R, .type = ARM_CP_CONST, 4959 .resetvalue = cpu->id_isar4 }, 4960 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, 4961 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, 4962 .access = PL1_R, .type = ARM_CP_CONST, 4963 .resetvalue = cpu->id_isar5 }, 4964 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, 4965 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, 4966 .access = PL1_R, .type = ARM_CP_CONST, 4967 .resetvalue = cpu->id_mmfr4 }, 4968 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, 4969 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, 4970 .access = PL1_R, .type = ARM_CP_CONST, 4971 .resetvalue = cpu->id_isar6 }, 4972 REGINFO_SENTINEL 4973 }; 4974 define_arm_cp_regs(cpu, v6_idregs); 4975 define_arm_cp_regs(cpu, v6_cp_reginfo); 4976 } else { 4977 define_arm_cp_regs(cpu, not_v6_cp_reginfo); 4978 } 4979 if (arm_feature(env, ARM_FEATURE_V6K)) { 4980 define_arm_cp_regs(cpu, v6k_cp_reginfo); 4981 } 4982 if (arm_feature(env, ARM_FEATURE_V7MP) && 4983 !arm_feature(env, ARM_FEATURE_PMSA)) { 4984 define_arm_cp_regs(cpu, v7mp_cp_reginfo); 4985 } 4986 if (arm_feature(env, ARM_FEATURE_V7)) { 4987 /* v7 performance monitor control register: same implementor 4988 * field as main ID register, and we implement only the cycle 4989 * count register. 4990 */ 4991 #ifndef CONFIG_USER_ONLY 4992 ARMCPRegInfo pmcr = { 4993 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, 4994 .access = PL0_RW, 4995 .type = ARM_CP_IO | ARM_CP_ALIAS, 4996 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), 4997 .accessfn = pmreg_access, .writefn = pmcr_write, 4998 .raw_writefn = raw_write, 4999 }; 5000 ARMCPRegInfo pmcr64 = { 5001 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, 5002 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, 5003 .access = PL0_RW, .accessfn = pmreg_access, 5004 .type = ARM_CP_IO, 5005 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), 5006 .resetvalue = cpu->midr & 0xff000000, 5007 .writefn = pmcr_write, .raw_writefn = raw_write, 5008 }; 5009 define_one_arm_cp_reg(cpu, &pmcr); 5010 define_one_arm_cp_reg(cpu, &pmcr64); 5011 #endif 5012 ARMCPRegInfo clidr = { 5013 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, 5014 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, 5015 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr 5016 }; 5017 define_one_arm_cp_reg(cpu, &clidr); 5018 define_arm_cp_regs(cpu, v7_cp_reginfo); 5019 define_debug_regs(cpu); 5020 } else { 5021 define_arm_cp_regs(cpu, not_v7_cp_reginfo); 5022 } 5023 if (arm_feature(env, ARM_FEATURE_V8)) { 5024 /* AArch64 ID registers, which all have impdef reset values. 5025 * Note that within the ID register ranges the unused slots 5026 * must all RAZ, not UNDEF; future architecture versions may 5027 * define new registers here. 5028 */ 5029 ARMCPRegInfo v8_idregs[] = { 5030 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't 5031 * know the right value for the GIC field until after we 5032 * define these regs. 5033 */ 5034 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, 5035 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, 5036 .access = PL1_R, .type = ARM_CP_NO_RAW, 5037 .readfn = id_aa64pfr0_read, 5038 .writefn = arm_cp_write_ignore }, 5039 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, 5040 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, 5041 .access = PL1_R, .type = ARM_CP_CONST, 5042 .resetvalue = cpu->id_aa64pfr1}, 5043 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5044 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, 5045 .access = PL1_R, .type = ARM_CP_CONST, 5046 .resetvalue = 0 }, 5047 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5048 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, 5049 .access = PL1_R, .type = ARM_CP_CONST, 5050 .resetvalue = 0 }, 5051 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64, 5052 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, 5053 .access = PL1_R, .type = ARM_CP_CONST, 5054 /* At present, only SVEver == 0 is defined anyway. */ 5055 .resetvalue = 0 }, 5056 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5057 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, 5058 .access = PL1_R, .type = ARM_CP_CONST, 5059 .resetvalue = 0 }, 5060 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5061 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, 5062 .access = PL1_R, .type = ARM_CP_CONST, 5063 .resetvalue = 0 }, 5064 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5065 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, 5066 .access = PL1_R, .type = ARM_CP_CONST, 5067 .resetvalue = 0 }, 5068 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, 5069 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, 5070 .access = PL1_R, .type = ARM_CP_CONST, 5071 .resetvalue = cpu->id_aa64dfr0 }, 5072 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, 5073 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, 5074 .access = PL1_R, .type = ARM_CP_CONST, 5075 .resetvalue = cpu->id_aa64dfr1 }, 5076 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5077 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, 5078 .access = PL1_R, .type = ARM_CP_CONST, 5079 .resetvalue = 0 }, 5080 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5081 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, 5082 .access = PL1_R, .type = ARM_CP_CONST, 5083 .resetvalue = 0 }, 5084 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, 5085 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, 5086 .access = PL1_R, .type = ARM_CP_CONST, 5087 .resetvalue = cpu->id_aa64afr0 }, 5088 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, 5089 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, 5090 .access = PL1_R, .type = ARM_CP_CONST, 5091 .resetvalue = cpu->id_aa64afr1 }, 5092 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5093 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, 5094 .access = PL1_R, .type = ARM_CP_CONST, 5095 .resetvalue = 0 }, 5096 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5097 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, 5098 .access = PL1_R, .type = ARM_CP_CONST, 5099 .resetvalue = 0 }, 5100 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, 5101 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, 5102 .access = PL1_R, .type = ARM_CP_CONST, 5103 .resetvalue = cpu->id_aa64isar0 }, 5104 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, 5105 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, 5106 .access = PL1_R, .type = ARM_CP_CONST, 5107 .resetvalue = cpu->id_aa64isar1 }, 5108 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5109 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 5110 .access = PL1_R, .type = ARM_CP_CONST, 5111 .resetvalue = 0 }, 5112 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5113 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, 5114 .access = PL1_R, .type = ARM_CP_CONST, 5115 .resetvalue = 0 }, 5116 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5117 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, 5118 .access = PL1_R, .type = ARM_CP_CONST, 5119 .resetvalue = 0 }, 5120 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5121 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, 5122 .access = PL1_R, .type = ARM_CP_CONST, 5123 .resetvalue = 0 }, 5124 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5125 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, 5126 .access = PL1_R, .type = ARM_CP_CONST, 5127 .resetvalue = 0 }, 5128 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5129 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, 5130 .access = PL1_R, .type = ARM_CP_CONST, 5131 .resetvalue = 0 }, 5132 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, 5133 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 5134 .access = PL1_R, .type = ARM_CP_CONST, 5135 .resetvalue = cpu->id_aa64mmfr0 }, 5136 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, 5137 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, 5138 .access = PL1_R, .type = ARM_CP_CONST, 5139 .resetvalue = cpu->id_aa64mmfr1 }, 5140 { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5141 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, 5142 .access = PL1_R, .type = ARM_CP_CONST, 5143 .resetvalue = 0 }, 5144 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5145 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, 5146 .access = PL1_R, .type = ARM_CP_CONST, 5147 .resetvalue = 0 }, 5148 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5149 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, 5150 .access = PL1_R, .type = ARM_CP_CONST, 5151 .resetvalue = 0 }, 5152 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5153 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, 5154 .access = PL1_R, .type = ARM_CP_CONST, 5155 .resetvalue = 0 }, 5156 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5157 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, 5158 .access = PL1_R, .type = ARM_CP_CONST, 5159 .resetvalue = 0 }, 5160 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5161 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, 5162 .access = PL1_R, .type = ARM_CP_CONST, 5163 .resetvalue = 0 }, 5164 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, 5165 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 5166 .access = PL1_R, .type = ARM_CP_CONST, 5167 .resetvalue = cpu->mvfr0 }, 5168 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, 5169 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 5170 .access = PL1_R, .type = ARM_CP_CONST, 5171 .resetvalue = cpu->mvfr1 }, 5172 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, 5173 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 5174 .access = PL1_R, .type = ARM_CP_CONST, 5175 .resetvalue = cpu->mvfr2 }, 5176 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5177 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, 5178 .access = PL1_R, .type = ARM_CP_CONST, 5179 .resetvalue = 0 }, 5180 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5181 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, 5182 .access = PL1_R, .type = ARM_CP_CONST, 5183 .resetvalue = 0 }, 5184 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5185 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, 5186 .access = PL1_R, .type = ARM_CP_CONST, 5187 .resetvalue = 0 }, 5188 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5189 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, 5190 .access = PL1_R, .type = ARM_CP_CONST, 5191 .resetvalue = 0 }, 5192 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5193 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, 5194 .access = PL1_R, .type = ARM_CP_CONST, 5195 .resetvalue = 0 }, 5196 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, 5197 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, 5198 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5199 .resetvalue = cpu->pmceid0 }, 5200 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, 5201 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, 5202 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5203 .resetvalue = cpu->pmceid0 }, 5204 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, 5205 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, 5206 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5207 .resetvalue = cpu->pmceid1 }, 5208 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, 5209 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, 5210 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5211 .resetvalue = cpu->pmceid1 }, 5212 REGINFO_SENTINEL 5213 }; 5214 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ 5215 if (!arm_feature(env, ARM_FEATURE_EL3) && 5216 !arm_feature(env, ARM_FEATURE_EL2)) { 5217 ARMCPRegInfo rvbar = { 5218 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, 5219 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 5220 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar 5221 }; 5222 define_one_arm_cp_reg(cpu, &rvbar); 5223 } 5224 define_arm_cp_regs(cpu, v8_idregs); 5225 define_arm_cp_regs(cpu, v8_cp_reginfo); 5226 } 5227 if (arm_feature(env, ARM_FEATURE_EL2)) { 5228 uint64_t vmpidr_def = mpidr_read_val(env); 5229 ARMCPRegInfo vpidr_regs[] = { 5230 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, 5231 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5232 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5233 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, 5234 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, 5235 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, 5236 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5237 .access = PL2_RW, .resetvalue = cpu->midr, 5238 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 5239 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, 5240 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5241 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5242 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, 5243 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, 5244 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, 5245 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5246 .access = PL2_RW, 5247 .resetvalue = vmpidr_def, 5248 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 5249 REGINFO_SENTINEL 5250 }; 5251 define_arm_cp_regs(cpu, vpidr_regs); 5252 define_arm_cp_regs(cpu, el2_cp_reginfo); 5253 if (arm_feature(env, ARM_FEATURE_V8)) { 5254 define_arm_cp_regs(cpu, el2_v8_cp_reginfo); 5255 } 5256 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ 5257 if (!arm_feature(env, ARM_FEATURE_EL3)) { 5258 ARMCPRegInfo rvbar = { 5259 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, 5260 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, 5261 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar 5262 }; 5263 define_one_arm_cp_reg(cpu, &rvbar); 5264 } 5265 } else { 5266 /* If EL2 is missing but higher ELs are enabled, we need to 5267 * register the no_el2 reginfos. 5268 */ 5269 if (arm_feature(env, ARM_FEATURE_EL3)) { 5270 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value 5271 * of MIDR_EL1 and MPIDR_EL1. 5272 */ 5273 ARMCPRegInfo vpidr_regs[] = { 5274 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5275 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5276 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5277 .type = ARM_CP_CONST, .resetvalue = cpu->midr, 5278 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 5279 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5280 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5281 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5282 .type = ARM_CP_NO_RAW, 5283 .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, 5284 REGINFO_SENTINEL 5285 }; 5286 define_arm_cp_regs(cpu, vpidr_regs); 5287 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); 5288 if (arm_feature(env, ARM_FEATURE_V8)) { 5289 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo); 5290 } 5291 } 5292 } 5293 if (arm_feature(env, ARM_FEATURE_EL3)) { 5294 define_arm_cp_regs(cpu, el3_cp_reginfo); 5295 ARMCPRegInfo el3_regs[] = { 5296 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, 5297 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, 5298 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, 5299 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, 5300 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, 5301 .access = PL3_RW, 5302 .raw_writefn = raw_write, .writefn = sctlr_write, 5303 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), 5304 .resetvalue = cpu->reset_sctlr }, 5305 REGINFO_SENTINEL 5306 }; 5307 5308 define_arm_cp_regs(cpu, el3_regs); 5309 } 5310 /* The behaviour of NSACR is sufficiently various that we don't 5311 * try to describe it in a single reginfo: 5312 * if EL3 is 64 bit, then trap to EL3 from S EL1, 5313 * reads as constant 0xc00 from NS EL1 and NS EL2 5314 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 5315 * if v7 without EL3, register doesn't exist 5316 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 5317 */ 5318 if (arm_feature(env, ARM_FEATURE_EL3)) { 5319 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5320 ARMCPRegInfo nsacr = { 5321 .name = "NSACR", .type = ARM_CP_CONST, 5322 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5323 .access = PL1_RW, .accessfn = nsacr_access, 5324 .resetvalue = 0xc00 5325 }; 5326 define_one_arm_cp_reg(cpu, &nsacr); 5327 } else { 5328 ARMCPRegInfo nsacr = { 5329 .name = "NSACR", 5330 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5331 .access = PL3_RW | PL1_R, 5332 .resetvalue = 0, 5333 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) 5334 }; 5335 define_one_arm_cp_reg(cpu, &nsacr); 5336 } 5337 } else { 5338 if (arm_feature(env, ARM_FEATURE_V8)) { 5339 ARMCPRegInfo nsacr = { 5340 .name = "NSACR", .type = ARM_CP_CONST, 5341 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5342 .access = PL1_R, 5343 .resetvalue = 0xc00 5344 }; 5345 define_one_arm_cp_reg(cpu, &nsacr); 5346 } 5347 } 5348 5349 if (arm_feature(env, ARM_FEATURE_PMSA)) { 5350 if (arm_feature(env, ARM_FEATURE_V6)) { 5351 /* PMSAv6 not implemented */ 5352 assert(arm_feature(env, ARM_FEATURE_V7)); 5353 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 5354 define_arm_cp_regs(cpu, pmsav7_cp_reginfo); 5355 } else { 5356 define_arm_cp_regs(cpu, pmsav5_cp_reginfo); 5357 } 5358 } else { 5359 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 5360 define_arm_cp_regs(cpu, vmsa_cp_reginfo); 5361 } 5362 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { 5363 define_arm_cp_regs(cpu, t2ee_cp_reginfo); 5364 } 5365 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { 5366 define_arm_cp_regs(cpu, generic_timer_cp_reginfo); 5367 } 5368 if (arm_feature(env, ARM_FEATURE_VAPA)) { 5369 define_arm_cp_regs(cpu, vapa_cp_reginfo); 5370 } 5371 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { 5372 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); 5373 } 5374 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { 5375 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); 5376 } 5377 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { 5378 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); 5379 } 5380 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 5381 define_arm_cp_regs(cpu, omap_cp_reginfo); 5382 } 5383 if (arm_feature(env, ARM_FEATURE_STRONGARM)) { 5384 define_arm_cp_regs(cpu, strongarm_cp_reginfo); 5385 } 5386 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 5387 define_arm_cp_regs(cpu, xscale_cp_reginfo); 5388 } 5389 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { 5390 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); 5391 } 5392 if (arm_feature(env, ARM_FEATURE_LPAE)) { 5393 define_arm_cp_regs(cpu, lpae_cp_reginfo); 5394 } 5395 /* Slightly awkwardly, the OMAP and StrongARM cores need all of 5396 * cp15 crn=0 to be writes-ignored, whereas for other cores they should 5397 * be read-only (ie write causes UNDEF exception). 5398 */ 5399 { 5400 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { 5401 /* Pre-v8 MIDR space. 5402 * Note that the MIDR isn't a simple constant register because 5403 * of the TI925 behaviour where writes to another register can 5404 * cause the MIDR value to change. 5405 * 5406 * Unimplemented registers in the c15 0 0 0 space default to 5407 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR 5408 * and friends override accordingly. 5409 */ 5410 { .name = "MIDR", 5411 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, 5412 .access = PL1_R, .resetvalue = cpu->midr, 5413 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, 5414 .readfn = midr_read, 5415 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 5416 .type = ARM_CP_OVERRIDE }, 5417 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ 5418 { .name = "DUMMY", 5419 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, 5420 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5421 { .name = "DUMMY", 5422 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, 5423 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5424 { .name = "DUMMY", 5425 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, 5426 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5427 { .name = "DUMMY", 5428 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, 5429 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5430 { .name = "DUMMY", 5431 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, 5432 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5433 REGINFO_SENTINEL 5434 }; 5435 ARMCPRegInfo id_v8_midr_cp_reginfo[] = { 5436 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, 5437 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, 5438 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, 5439 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 5440 .readfn = midr_read }, 5441 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ 5442 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 5443 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 5444 .access = PL1_R, .resetvalue = cpu->midr }, 5445 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 5446 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, 5447 .access = PL1_R, .resetvalue = cpu->midr }, 5448 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, 5449 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, 5450 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, 5451 REGINFO_SENTINEL 5452 }; 5453 ARMCPRegInfo id_cp_reginfo[] = { 5454 /* These are common to v8 and pre-v8 */ 5455 { .name = "CTR", 5456 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, 5457 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 5458 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, 5459 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, 5460 .access = PL0_R, .accessfn = ctr_el0_access, 5461 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 5462 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ 5463 { .name = "TCMTR", 5464 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, 5465 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5466 REGINFO_SENTINEL 5467 }; 5468 /* TLBTR is specific to VMSA */ 5469 ARMCPRegInfo id_tlbtr_reginfo = { 5470 .name = "TLBTR", 5471 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, 5472 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0, 5473 }; 5474 /* MPUIR is specific to PMSA V6+ */ 5475 ARMCPRegInfo id_mpuir_reginfo = { 5476 .name = "MPUIR", 5477 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 5478 .access = PL1_R, .type = ARM_CP_CONST, 5479 .resetvalue = cpu->pmsav7_dregion << 8 5480 }; 5481 ARMCPRegInfo crn0_wi_reginfo = { 5482 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, 5483 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, 5484 .type = ARM_CP_NOP | ARM_CP_OVERRIDE 5485 }; 5486 if (arm_feature(env, ARM_FEATURE_OMAPCP) || 5487 arm_feature(env, ARM_FEATURE_STRONGARM)) { 5488 ARMCPRegInfo *r; 5489 /* Register the blanket "writes ignored" value first to cover the 5490 * whole space. Then update the specific ID registers to allow write 5491 * access, so that they ignore writes rather than causing them to 5492 * UNDEF. 5493 */ 5494 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); 5495 for (r = id_pre_v8_midr_cp_reginfo; 5496 r->type != ARM_CP_SENTINEL; r++) { 5497 r->access = PL1_RW; 5498 } 5499 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { 5500 r->access = PL1_RW; 5501 } 5502 id_mpuir_reginfo.access = PL1_RW; 5503 id_tlbtr_reginfo.access = PL1_RW; 5504 } 5505 if (arm_feature(env, ARM_FEATURE_V8)) { 5506 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); 5507 } else { 5508 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); 5509 } 5510 define_arm_cp_regs(cpu, id_cp_reginfo); 5511 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 5512 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); 5513 } else if (arm_feature(env, ARM_FEATURE_V7)) { 5514 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 5515 } 5516 } 5517 5518 if (arm_feature(env, ARM_FEATURE_MPIDR)) { 5519 define_arm_cp_regs(cpu, mpidr_cp_reginfo); 5520 } 5521 5522 if (arm_feature(env, ARM_FEATURE_AUXCR)) { 5523 ARMCPRegInfo auxcr_reginfo[] = { 5524 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, 5525 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, 5526 .access = PL1_RW, .type = ARM_CP_CONST, 5527 .resetvalue = cpu->reset_auxcr }, 5528 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, 5529 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, 5530 .access = PL2_RW, .type = ARM_CP_CONST, 5531 .resetvalue = 0 }, 5532 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, 5533 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, 5534 .access = PL3_RW, .type = ARM_CP_CONST, 5535 .resetvalue = 0 }, 5536 REGINFO_SENTINEL 5537 }; 5538 define_arm_cp_regs(cpu, auxcr_reginfo); 5539 if (arm_feature(env, ARM_FEATURE_V8)) { 5540 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */ 5541 ARMCPRegInfo hactlr2_reginfo = { 5542 .name = "HACTLR2", .state = ARM_CP_STATE_AA32, 5543 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, 5544 .access = PL2_RW, .type = ARM_CP_CONST, 5545 .resetvalue = 0 5546 }; 5547 define_one_arm_cp_reg(cpu, &hactlr2_reginfo); 5548 } 5549 } 5550 5551 if (arm_feature(env, ARM_FEATURE_CBAR)) { 5552 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5553 /* 32 bit view is [31:18] 0...0 [43:32]. */ 5554 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) 5555 | extract64(cpu->reset_cbar, 32, 12); 5556 ARMCPRegInfo cbar_reginfo[] = { 5557 { .name = "CBAR", 5558 .type = ARM_CP_CONST, 5559 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 5560 .access = PL1_R, .resetvalue = cpu->reset_cbar }, 5561 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, 5562 .type = ARM_CP_CONST, 5563 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, 5564 .access = PL1_R, .resetvalue = cbar32 }, 5565 REGINFO_SENTINEL 5566 }; 5567 /* We don't implement a r/w 64 bit CBAR currently */ 5568 assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); 5569 define_arm_cp_regs(cpu, cbar_reginfo); 5570 } else { 5571 ARMCPRegInfo cbar = { 5572 .name = "CBAR", 5573 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 5574 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, 5575 .fieldoffset = offsetof(CPUARMState, 5576 cp15.c15_config_base_address) 5577 }; 5578 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { 5579 cbar.access = PL1_R; 5580 cbar.fieldoffset = 0; 5581 cbar.type = ARM_CP_CONST; 5582 } 5583 define_one_arm_cp_reg(cpu, &cbar); 5584 } 5585 } 5586 5587 if (arm_feature(env, ARM_FEATURE_VBAR)) { 5588 ARMCPRegInfo vbar_cp_reginfo[] = { 5589 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, 5590 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, 5591 .access = PL1_RW, .writefn = vbar_write, 5592 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), 5593 offsetof(CPUARMState, cp15.vbar_ns) }, 5594 .resetvalue = 0 }, 5595 REGINFO_SENTINEL 5596 }; 5597 define_arm_cp_regs(cpu, vbar_cp_reginfo); 5598 } 5599 5600 /* Generic registers whose values depend on the implementation */ 5601 { 5602 ARMCPRegInfo sctlr = { 5603 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, 5604 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 5605 .access = PL1_RW, 5606 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), 5607 offsetof(CPUARMState, cp15.sctlr_ns) }, 5608 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, 5609 .raw_writefn = raw_write, 5610 }; 5611 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 5612 /* Normally we would always end the TB on an SCTLR write, but Linux 5613 * arch/arm/mach-pxa/sleep.S expects two instructions following 5614 * an MMU enable to execute from cache. Imitate this behaviour. 5615 */ 5616 sctlr.type |= ARM_CP_SUPPRESS_TB_END; 5617 } 5618 define_one_arm_cp_reg(cpu, &sctlr); 5619 } 5620 5621 if (arm_feature(env, ARM_FEATURE_SVE)) { 5622 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); 5623 if (arm_feature(env, ARM_FEATURE_EL2)) { 5624 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); 5625 } else { 5626 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); 5627 } 5628 if (arm_feature(env, ARM_FEATURE_EL3)) { 5629 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); 5630 } 5631 } 5632 } 5633 5634 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) 5635 { 5636 CPUState *cs = CPU(cpu); 5637 CPUARMState *env = &cpu->env; 5638 5639 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5640 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, 5641 aarch64_fpu_gdb_set_reg, 5642 34, "aarch64-fpu.xml", 0); 5643 } else if (arm_feature(env, ARM_FEATURE_NEON)) { 5644 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5645 51, "arm-neon.xml", 0); 5646 } else if (arm_feature(env, ARM_FEATURE_VFP3)) { 5647 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5648 35, "arm-vfp3.xml", 0); 5649 } else if (arm_feature(env, ARM_FEATURE_VFP)) { 5650 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5651 19, "arm-vfp.xml", 0); 5652 } 5653 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, 5654 arm_gen_dynamic_xml(cs), 5655 "system-registers.xml", 0); 5656 } 5657 5658 /* Sort alphabetically by type name, except for "any". */ 5659 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) 5660 { 5661 ObjectClass *class_a = (ObjectClass *)a; 5662 ObjectClass *class_b = (ObjectClass *)b; 5663 const char *name_a, *name_b; 5664 5665 name_a = object_class_get_name(class_a); 5666 name_b = object_class_get_name(class_b); 5667 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { 5668 return 1; 5669 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { 5670 return -1; 5671 } else { 5672 return strcmp(name_a, name_b); 5673 } 5674 } 5675 5676 static void arm_cpu_list_entry(gpointer data, gpointer user_data) 5677 { 5678 ObjectClass *oc = data; 5679 CPUListState *s = user_data; 5680 const char *typename; 5681 char *name; 5682 5683 typename = object_class_get_name(oc); 5684 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); 5685 (*s->cpu_fprintf)(s->file, " %s\n", 5686 name); 5687 g_free(name); 5688 } 5689 5690 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf) 5691 { 5692 CPUListState s = { 5693 .file = f, 5694 .cpu_fprintf = cpu_fprintf, 5695 }; 5696 GSList *list; 5697 5698 list = object_class_get_list(TYPE_ARM_CPU, false); 5699 list = g_slist_sort(list, arm_cpu_list_compare); 5700 (*cpu_fprintf)(f, "Available CPUs:\n"); 5701 g_slist_foreach(list, arm_cpu_list_entry, &s); 5702 g_slist_free(list); 5703 } 5704 5705 static void arm_cpu_add_definition(gpointer data, gpointer user_data) 5706 { 5707 ObjectClass *oc = data; 5708 CpuDefinitionInfoList **cpu_list = user_data; 5709 CpuDefinitionInfoList *entry; 5710 CpuDefinitionInfo *info; 5711 const char *typename; 5712 5713 typename = object_class_get_name(oc); 5714 info = g_malloc0(sizeof(*info)); 5715 info->name = g_strndup(typename, 5716 strlen(typename) - strlen("-" TYPE_ARM_CPU)); 5717 info->q_typename = g_strdup(typename); 5718 5719 entry = g_malloc0(sizeof(*entry)); 5720 entry->value = info; 5721 entry->next = *cpu_list; 5722 *cpu_list = entry; 5723 } 5724 5725 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp) 5726 { 5727 CpuDefinitionInfoList *cpu_list = NULL; 5728 GSList *list; 5729 5730 list = object_class_get_list(TYPE_ARM_CPU, false); 5731 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); 5732 g_slist_free(list); 5733 5734 return cpu_list; 5735 } 5736 5737 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, 5738 void *opaque, int state, int secstate, 5739 int crm, int opc1, int opc2, 5740 const char *name) 5741 { 5742 /* Private utility function for define_one_arm_cp_reg_with_opaque(): 5743 * add a single reginfo struct to the hash table. 5744 */ 5745 uint32_t *key = g_new(uint32_t, 1); 5746 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); 5747 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; 5748 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; 5749 5750 r2->name = g_strdup(name); 5751 /* Reset the secure state to the specific incoming state. This is 5752 * necessary as the register may have been defined with both states. 5753 */ 5754 r2->secure = secstate; 5755 5756 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 5757 /* Register is banked (using both entries in array). 5758 * Overwriting fieldoffset as the array is only used to define 5759 * banked registers but later only fieldoffset is used. 5760 */ 5761 r2->fieldoffset = r->bank_fieldoffsets[ns]; 5762 } 5763 5764 if (state == ARM_CP_STATE_AA32) { 5765 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 5766 /* If the register is banked then we don't need to migrate or 5767 * reset the 32-bit instance in certain cases: 5768 * 5769 * 1) If the register has both 32-bit and 64-bit instances then we 5770 * can count on the 64-bit instance taking care of the 5771 * non-secure bank. 5772 * 2) If ARMv8 is enabled then we can count on a 64-bit version 5773 * taking care of the secure bank. This requires that separate 5774 * 32 and 64-bit definitions are provided. 5775 */ 5776 if ((r->state == ARM_CP_STATE_BOTH && ns) || 5777 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { 5778 r2->type |= ARM_CP_ALIAS; 5779 } 5780 } else if ((secstate != r->secure) && !ns) { 5781 /* The register is not banked so we only want to allow migration of 5782 * the non-secure instance. 5783 */ 5784 r2->type |= ARM_CP_ALIAS; 5785 } 5786 5787 if (r->state == ARM_CP_STATE_BOTH) { 5788 /* We assume it is a cp15 register if the .cp field is left unset. 5789 */ 5790 if (r2->cp == 0) { 5791 r2->cp = 15; 5792 } 5793 5794 #ifdef HOST_WORDS_BIGENDIAN 5795 if (r2->fieldoffset) { 5796 r2->fieldoffset += sizeof(uint32_t); 5797 } 5798 #endif 5799 } 5800 } 5801 if (state == ARM_CP_STATE_AA64) { 5802 /* To allow abbreviation of ARMCPRegInfo 5803 * definitions, we treat cp == 0 as equivalent to 5804 * the value for "standard guest-visible sysreg". 5805 * STATE_BOTH definitions are also always "standard 5806 * sysreg" in their AArch64 view (the .cp value may 5807 * be non-zero for the benefit of the AArch32 view). 5808 */ 5809 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { 5810 r2->cp = CP_REG_ARM64_SYSREG_CP; 5811 } 5812 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, 5813 r2->opc0, opc1, opc2); 5814 } else { 5815 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); 5816 } 5817 if (opaque) { 5818 r2->opaque = opaque; 5819 } 5820 /* reginfo passed to helpers is correct for the actual access, 5821 * and is never ARM_CP_STATE_BOTH: 5822 */ 5823 r2->state = state; 5824 /* Make sure reginfo passed to helpers for wildcarded regs 5825 * has the correct crm/opc1/opc2 for this reg, not CP_ANY: 5826 */ 5827 r2->crm = crm; 5828 r2->opc1 = opc1; 5829 r2->opc2 = opc2; 5830 /* By convention, for wildcarded registers only the first 5831 * entry is used for migration; the others are marked as 5832 * ALIAS so we don't try to transfer the register 5833 * multiple times. Special registers (ie NOP/WFI) are 5834 * never migratable and not even raw-accessible. 5835 */ 5836 if ((r->type & ARM_CP_SPECIAL)) { 5837 r2->type |= ARM_CP_NO_RAW; 5838 } 5839 if (((r->crm == CP_ANY) && crm != 0) || 5840 ((r->opc1 == CP_ANY) && opc1 != 0) || 5841 ((r->opc2 == CP_ANY) && opc2 != 0)) { 5842 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; 5843 } 5844 5845 /* Check that raw accesses are either forbidden or handled. Note that 5846 * we can't assert this earlier because the setup of fieldoffset for 5847 * banked registers has to be done first. 5848 */ 5849 if (!(r2->type & ARM_CP_NO_RAW)) { 5850 assert(!raw_accessors_invalid(r2)); 5851 } 5852 5853 /* Overriding of an existing definition must be explicitly 5854 * requested. 5855 */ 5856 if (!(r->type & ARM_CP_OVERRIDE)) { 5857 ARMCPRegInfo *oldreg; 5858 oldreg = g_hash_table_lookup(cpu->cp_regs, key); 5859 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { 5860 fprintf(stderr, "Register redefined: cp=%d %d bit " 5861 "crn=%d crm=%d opc1=%d opc2=%d, " 5862 "was %s, now %s\n", r2->cp, 32 + 32 * is64, 5863 r2->crn, r2->crm, r2->opc1, r2->opc2, 5864 oldreg->name, r2->name); 5865 g_assert_not_reached(); 5866 } 5867 } 5868 g_hash_table_insert(cpu->cp_regs, key, r2); 5869 } 5870 5871 5872 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 5873 const ARMCPRegInfo *r, void *opaque) 5874 { 5875 /* Define implementations of coprocessor registers. 5876 * We store these in a hashtable because typically 5877 * there are less than 150 registers in a space which 5878 * is 16*16*16*8*8 = 262144 in size. 5879 * Wildcarding is supported for the crm, opc1 and opc2 fields. 5880 * If a register is defined twice then the second definition is 5881 * used, so this can be used to define some generic registers and 5882 * then override them with implementation specific variations. 5883 * At least one of the original and the second definition should 5884 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard 5885 * against accidental use. 5886 * 5887 * The state field defines whether the register is to be 5888 * visible in the AArch32 or AArch64 execution state. If the 5889 * state is set to ARM_CP_STATE_BOTH then we synthesise a 5890 * reginfo structure for the AArch32 view, which sees the lower 5891 * 32 bits of the 64 bit register. 5892 * 5893 * Only registers visible in AArch64 may set r->opc0; opc0 cannot 5894 * be wildcarded. AArch64 registers are always considered to be 64 5895 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of 5896 * the register, if any. 5897 */ 5898 int crm, opc1, opc2, state; 5899 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; 5900 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; 5901 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; 5902 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; 5903 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; 5904 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; 5905 /* 64 bit registers have only CRm and Opc1 fields */ 5906 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); 5907 /* op0 only exists in the AArch64 encodings */ 5908 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); 5909 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ 5910 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); 5911 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 5912 * encodes a minimum access level for the register. We roll this 5913 * runtime check into our general permission check code, so check 5914 * here that the reginfo's specified permissions are strict enough 5915 * to encompass the generic architectural permission check. 5916 */ 5917 if (r->state != ARM_CP_STATE_AA32) { 5918 int mask = 0; 5919 switch (r->opc1) { 5920 case 0: case 1: case 2: 5921 /* min_EL EL1 */ 5922 mask = PL1_RW; 5923 break; 5924 case 3: 5925 /* min_EL EL0 */ 5926 mask = PL0_RW; 5927 break; 5928 case 4: 5929 /* min_EL EL2 */ 5930 mask = PL2_RW; 5931 break; 5932 case 5: 5933 /* unallocated encoding, so not possible */ 5934 assert(false); 5935 break; 5936 case 6: 5937 /* min_EL EL3 */ 5938 mask = PL3_RW; 5939 break; 5940 case 7: 5941 /* min_EL EL1, secure mode only (we don't check the latter) */ 5942 mask = PL1_RW; 5943 break; 5944 default: 5945 /* broken reginfo with out-of-range opc1 */ 5946 assert(false); 5947 break; 5948 } 5949 /* assert our permissions are not too lax (stricter is fine) */ 5950 assert((r->access & ~mask) == 0); 5951 } 5952 5953 /* Check that the register definition has enough info to handle 5954 * reads and writes if they are permitted. 5955 */ 5956 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { 5957 if (r->access & PL3_R) { 5958 assert((r->fieldoffset || 5959 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 5960 r->readfn); 5961 } 5962 if (r->access & PL3_W) { 5963 assert((r->fieldoffset || 5964 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 5965 r->writefn); 5966 } 5967 } 5968 /* Bad type field probably means missing sentinel at end of reg list */ 5969 assert(cptype_valid(r->type)); 5970 for (crm = crmmin; crm <= crmmax; crm++) { 5971 for (opc1 = opc1min; opc1 <= opc1max; opc1++) { 5972 for (opc2 = opc2min; opc2 <= opc2max; opc2++) { 5973 for (state = ARM_CP_STATE_AA32; 5974 state <= ARM_CP_STATE_AA64; state++) { 5975 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { 5976 continue; 5977 } 5978 if (state == ARM_CP_STATE_AA32) { 5979 /* Under AArch32 CP registers can be common 5980 * (same for secure and non-secure world) or banked. 5981 */ 5982 char *name; 5983 5984 switch (r->secure) { 5985 case ARM_CP_SECSTATE_S: 5986 case ARM_CP_SECSTATE_NS: 5987 add_cpreg_to_hashtable(cpu, r, opaque, state, 5988 r->secure, crm, opc1, opc2, 5989 r->name); 5990 break; 5991 default: 5992 name = g_strdup_printf("%s_S", r->name); 5993 add_cpreg_to_hashtable(cpu, r, opaque, state, 5994 ARM_CP_SECSTATE_S, 5995 crm, opc1, opc2, name); 5996 g_free(name); 5997 add_cpreg_to_hashtable(cpu, r, opaque, state, 5998 ARM_CP_SECSTATE_NS, 5999 crm, opc1, opc2, r->name); 6000 break; 6001 } 6002 } else { 6003 /* AArch64 registers get mapped to non-secure instance 6004 * of AArch32 */ 6005 add_cpreg_to_hashtable(cpu, r, opaque, state, 6006 ARM_CP_SECSTATE_NS, 6007 crm, opc1, opc2, r->name); 6008 } 6009 } 6010 } 6011 } 6012 } 6013 } 6014 6015 void define_arm_cp_regs_with_opaque(ARMCPU *cpu, 6016 const ARMCPRegInfo *regs, void *opaque) 6017 { 6018 /* Define a whole list of registers */ 6019 const ARMCPRegInfo *r; 6020 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 6021 define_one_arm_cp_reg_with_opaque(cpu, r, opaque); 6022 } 6023 } 6024 6025 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) 6026 { 6027 return g_hash_table_lookup(cpregs, &encoded_cp); 6028 } 6029 6030 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 6031 uint64_t value) 6032 { 6033 /* Helper coprocessor write function for write-ignore registers */ 6034 } 6035 6036 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) 6037 { 6038 /* Helper coprocessor write function for read-as-zero registers */ 6039 return 0; 6040 } 6041 6042 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) 6043 { 6044 /* Helper coprocessor reset function for do-nothing-on-reset registers */ 6045 } 6046 6047 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) 6048 { 6049 /* Return true if it is not valid for us to switch to 6050 * this CPU mode (ie all the UNPREDICTABLE cases in 6051 * the ARM ARM CPSRWriteByInstr pseudocode). 6052 */ 6053 6054 /* Changes to or from Hyp via MSR and CPS are illegal. */ 6055 if (write_type == CPSRWriteByInstr && 6056 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || 6057 mode == ARM_CPU_MODE_HYP)) { 6058 return 1; 6059 } 6060 6061 switch (mode) { 6062 case ARM_CPU_MODE_USR: 6063 return 0; 6064 case ARM_CPU_MODE_SYS: 6065 case ARM_CPU_MODE_SVC: 6066 case ARM_CPU_MODE_ABT: 6067 case ARM_CPU_MODE_UND: 6068 case ARM_CPU_MODE_IRQ: 6069 case ARM_CPU_MODE_FIQ: 6070 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 6071 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) 6072 */ 6073 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR 6074 * and CPS are treated as illegal mode changes. 6075 */ 6076 if (write_type == CPSRWriteByInstr && 6077 (env->cp15.hcr_el2 & HCR_TGE) && 6078 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && 6079 !arm_is_secure_below_el3(env)) { 6080 return 1; 6081 } 6082 return 0; 6083 case ARM_CPU_MODE_HYP: 6084 return !arm_feature(env, ARM_FEATURE_EL2) 6085 || arm_current_el(env) < 2 || arm_is_secure(env); 6086 case ARM_CPU_MODE_MON: 6087 return arm_current_el(env) < 3; 6088 default: 6089 return 1; 6090 } 6091 } 6092 6093 uint32_t cpsr_read(CPUARMState *env) 6094 { 6095 int ZF; 6096 ZF = (env->ZF == 0); 6097 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 6098 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 6099 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 6100 | ((env->condexec_bits & 0xfc) << 8) 6101 | (env->GE << 16) | (env->daif & CPSR_AIF); 6102 } 6103 6104 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 6105 CPSRWriteType write_type) 6106 { 6107 uint32_t changed_daif; 6108 6109 if (mask & CPSR_NZCV) { 6110 env->ZF = (~val) & CPSR_Z; 6111 env->NF = val; 6112 env->CF = (val >> 29) & 1; 6113 env->VF = (val << 3) & 0x80000000; 6114 } 6115 if (mask & CPSR_Q) 6116 env->QF = ((val & CPSR_Q) != 0); 6117 if (mask & CPSR_T) 6118 env->thumb = ((val & CPSR_T) != 0); 6119 if (mask & CPSR_IT_0_1) { 6120 env->condexec_bits &= ~3; 6121 env->condexec_bits |= (val >> 25) & 3; 6122 } 6123 if (mask & CPSR_IT_2_7) { 6124 env->condexec_bits &= 3; 6125 env->condexec_bits |= (val >> 8) & 0xfc; 6126 } 6127 if (mask & CPSR_GE) { 6128 env->GE = (val >> 16) & 0xf; 6129 } 6130 6131 /* In a V7 implementation that includes the security extensions but does 6132 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control 6133 * whether non-secure software is allowed to change the CPSR_F and CPSR_A 6134 * bits respectively. 6135 * 6136 * In a V8 implementation, it is permitted for privileged software to 6137 * change the CPSR A/F bits regardless of the SCR.AW/FW bits. 6138 */ 6139 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && 6140 arm_feature(env, ARM_FEATURE_EL3) && 6141 !arm_feature(env, ARM_FEATURE_EL2) && 6142 !arm_is_secure(env)) { 6143 6144 changed_daif = (env->daif ^ val) & mask; 6145 6146 if (changed_daif & CPSR_A) { 6147 /* Check to see if we are allowed to change the masking of async 6148 * abort exceptions from a non-secure state. 6149 */ 6150 if (!(env->cp15.scr_el3 & SCR_AW)) { 6151 qemu_log_mask(LOG_GUEST_ERROR, 6152 "Ignoring attempt to switch CPSR_A flag from " 6153 "non-secure world with SCR.AW bit clear\n"); 6154 mask &= ~CPSR_A; 6155 } 6156 } 6157 6158 if (changed_daif & CPSR_F) { 6159 /* Check to see if we are allowed to change the masking of FIQ 6160 * exceptions from a non-secure state. 6161 */ 6162 if (!(env->cp15.scr_el3 & SCR_FW)) { 6163 qemu_log_mask(LOG_GUEST_ERROR, 6164 "Ignoring attempt to switch CPSR_F flag from " 6165 "non-secure world with SCR.FW bit clear\n"); 6166 mask &= ~CPSR_F; 6167 } 6168 6169 /* Check whether non-maskable FIQ (NMFI) support is enabled. 6170 * If this bit is set software is not allowed to mask 6171 * FIQs, but is allowed to set CPSR_F to 0. 6172 */ 6173 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && 6174 (val & CPSR_F)) { 6175 qemu_log_mask(LOG_GUEST_ERROR, 6176 "Ignoring attempt to enable CPSR_F flag " 6177 "(non-maskable FIQ [NMFI] support enabled)\n"); 6178 mask &= ~CPSR_F; 6179 } 6180 } 6181 } 6182 6183 env->daif &= ~(CPSR_AIF & mask); 6184 env->daif |= val & CPSR_AIF & mask; 6185 6186 if (write_type != CPSRWriteRaw && 6187 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { 6188 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { 6189 /* Note that we can only get here in USR mode if this is a 6190 * gdb stub write; for this case we follow the architectural 6191 * behaviour for guest writes in USR mode of ignoring an attempt 6192 * to switch mode. (Those are caught by translate.c for writes 6193 * triggered by guest instructions.) 6194 */ 6195 mask &= ~CPSR_M; 6196 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { 6197 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in 6198 * v7, and has defined behaviour in v8: 6199 * + leave CPSR.M untouched 6200 * + allow changes to the other CPSR fields 6201 * + set PSTATE.IL 6202 * For user changes via the GDB stub, we don't set PSTATE.IL, 6203 * as this would be unnecessarily harsh for a user error. 6204 */ 6205 mask &= ~CPSR_M; 6206 if (write_type != CPSRWriteByGDBStub && 6207 arm_feature(env, ARM_FEATURE_V8)) { 6208 mask |= CPSR_IL; 6209 val |= CPSR_IL; 6210 } 6211 } else { 6212 switch_mode(env, val & CPSR_M); 6213 } 6214 } 6215 mask &= ~CACHED_CPSR_BITS; 6216 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 6217 } 6218 6219 /* Sign/zero extend */ 6220 uint32_t HELPER(sxtb16)(uint32_t x) 6221 { 6222 uint32_t res; 6223 res = (uint16_t)(int8_t)x; 6224 res |= (uint32_t)(int8_t)(x >> 16) << 16; 6225 return res; 6226 } 6227 6228 uint32_t HELPER(uxtb16)(uint32_t x) 6229 { 6230 uint32_t res; 6231 res = (uint16_t)(uint8_t)x; 6232 res |= (uint32_t)(uint8_t)(x >> 16) << 16; 6233 return res; 6234 } 6235 6236 int32_t HELPER(sdiv)(int32_t num, int32_t den) 6237 { 6238 if (den == 0) 6239 return 0; 6240 if (num == INT_MIN && den == -1) 6241 return INT_MIN; 6242 return num / den; 6243 } 6244 6245 uint32_t HELPER(udiv)(uint32_t num, uint32_t den) 6246 { 6247 if (den == 0) 6248 return 0; 6249 return num / den; 6250 } 6251 6252 uint32_t HELPER(rbit)(uint32_t x) 6253 { 6254 return revbit32(x); 6255 } 6256 6257 #if defined(CONFIG_USER_ONLY) 6258 6259 /* These should probably raise undefined insn exceptions. */ 6260 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) 6261 { 6262 ARMCPU *cpu = arm_env_get_cpu(env); 6263 6264 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg); 6265 } 6266 6267 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 6268 { 6269 ARMCPU *cpu = arm_env_get_cpu(env); 6270 6271 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg); 6272 return 0; 6273 } 6274 6275 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) 6276 { 6277 /* translate.c should never generate calls here in user-only mode */ 6278 g_assert_not_reached(); 6279 } 6280 6281 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) 6282 { 6283 /* translate.c should never generate calls here in user-only mode */ 6284 g_assert_not_reached(); 6285 } 6286 6287 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) 6288 { 6289 /* The TT instructions can be used by unprivileged code, but in 6290 * user-only emulation we don't have the MPU. 6291 * Luckily since we know we are NonSecure unprivileged (and that in 6292 * turn means that the A flag wasn't specified), all the bits in the 6293 * register must be zero: 6294 * IREGION: 0 because IRVALID is 0 6295 * IRVALID: 0 because NS 6296 * S: 0 because NS 6297 * NSRW: 0 because NS 6298 * NSR: 0 because NS 6299 * RW: 0 because unpriv and A flag not set 6300 * R: 0 because unpriv and A flag not set 6301 * SRVALID: 0 because NS 6302 * MRVALID: 0 because unpriv and A flag not set 6303 * SREGION: 0 becaus SRVALID is 0 6304 * MREGION: 0 because MRVALID is 0 6305 */ 6306 return 0; 6307 } 6308 6309 void switch_mode(CPUARMState *env, int mode) 6310 { 6311 ARMCPU *cpu = arm_env_get_cpu(env); 6312 6313 if (mode != ARM_CPU_MODE_USR) { 6314 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); 6315 } 6316 } 6317 6318 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 6319 uint32_t cur_el, bool secure) 6320 { 6321 return 1; 6322 } 6323 6324 void aarch64_sync_64_to_32(CPUARMState *env) 6325 { 6326 g_assert_not_reached(); 6327 } 6328 6329 #else 6330 6331 void switch_mode(CPUARMState *env, int mode) 6332 { 6333 int old_mode; 6334 int i; 6335 6336 old_mode = env->uncached_cpsr & CPSR_M; 6337 if (mode == old_mode) 6338 return; 6339 6340 if (old_mode == ARM_CPU_MODE_FIQ) { 6341 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 6342 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 6343 } else if (mode == ARM_CPU_MODE_FIQ) { 6344 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 6345 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 6346 } 6347 6348 i = bank_number(old_mode); 6349 env->banked_r13[i] = env->regs[13]; 6350 env->banked_r14[i] = env->regs[14]; 6351 env->banked_spsr[i] = env->spsr; 6352 6353 i = bank_number(mode); 6354 env->regs[13] = env->banked_r13[i]; 6355 env->regs[14] = env->banked_r14[i]; 6356 env->spsr = env->banked_spsr[i]; 6357 } 6358 6359 /* Physical Interrupt Target EL Lookup Table 6360 * 6361 * [ From ARM ARM section G1.13.4 (Table G1-15) ] 6362 * 6363 * The below multi-dimensional table is used for looking up the target 6364 * exception level given numerous condition criteria. Specifically, the 6365 * target EL is based on SCR and HCR routing controls as well as the 6366 * currently executing EL and secure state. 6367 * 6368 * Dimensions: 6369 * target_el_table[2][2][2][2][2][4] 6370 * | | | | | +--- Current EL 6371 * | | | | +------ Non-secure(0)/Secure(1) 6372 * | | | +--------- HCR mask override 6373 * | | +------------ SCR exec state control 6374 * | +--------------- SCR mask override 6375 * +------------------ 32-bit(0)/64-bit(1) EL3 6376 * 6377 * The table values are as such: 6378 * 0-3 = EL0-EL3 6379 * -1 = Cannot occur 6380 * 6381 * The ARM ARM target EL table includes entries indicating that an "exception 6382 * is not taken". The two cases where this is applicable are: 6383 * 1) An exception is taken from EL3 but the SCR does not have the exception 6384 * routed to EL3. 6385 * 2) An exception is taken from EL2 but the HCR does not have the exception 6386 * routed to EL2. 6387 * In these two cases, the below table contain a target of EL1. This value is 6388 * returned as it is expected that the consumer of the table data will check 6389 * for "target EL >= current EL" to ensure the exception is not taken. 6390 * 6391 * SCR HCR 6392 * 64 EA AMO From 6393 * BIT IRQ IMO Non-secure Secure 6394 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 6395 */ 6396 static const int8_t target_el_table[2][2][2][2][2][4] = { 6397 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 6398 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, 6399 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 6400 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, 6401 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 6402 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, 6403 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 6404 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, 6405 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, 6406 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},}, 6407 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },}, 6408 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},}, 6409 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 6410 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, 6411 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 6412 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},}, 6413 }; 6414 6415 /* 6416 * Determine the target EL for physical exceptions 6417 */ 6418 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 6419 uint32_t cur_el, bool secure) 6420 { 6421 CPUARMState *env = cs->env_ptr; 6422 int rw; 6423 int scr; 6424 int hcr; 6425 int target_el; 6426 /* Is the highest EL AArch64? */ 6427 int is64 = arm_feature(env, ARM_FEATURE_AARCH64); 6428 6429 if (arm_feature(env, ARM_FEATURE_EL3)) { 6430 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); 6431 } else { 6432 /* Either EL2 is the highest EL (and so the EL2 register width 6433 * is given by is64); or there is no EL2 or EL3, in which case 6434 * the value of 'rw' does not affect the table lookup anyway. 6435 */ 6436 rw = is64; 6437 } 6438 6439 switch (excp_idx) { 6440 case EXCP_IRQ: 6441 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); 6442 hcr = arm_hcr_el2_imo(env); 6443 break; 6444 case EXCP_FIQ: 6445 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); 6446 hcr = arm_hcr_el2_fmo(env); 6447 break; 6448 default: 6449 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); 6450 hcr = arm_hcr_el2_amo(env); 6451 break; 6452 }; 6453 6454 /* If HCR.TGE is set then HCR is treated as being 1 */ 6455 hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE); 6456 6457 /* Perform a table-lookup for the target EL given the current state */ 6458 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; 6459 6460 assert(target_el > 0); 6461 6462 return target_el; 6463 } 6464 6465 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value, 6466 ARMMMUIdx mmu_idx, bool ignfault) 6467 { 6468 CPUState *cs = CPU(cpu); 6469 CPUARMState *env = &cpu->env; 6470 MemTxAttrs attrs = {}; 6471 MemTxResult txres; 6472 target_ulong page_size; 6473 hwaddr physaddr; 6474 int prot; 6475 ARMMMUFaultInfo fi = {}; 6476 bool secure = mmu_idx & ARM_MMU_IDX_M_S; 6477 int exc; 6478 bool exc_secure; 6479 6480 if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr, 6481 &attrs, &prot, &page_size, &fi, NULL)) { 6482 /* MPU/SAU lookup failed */ 6483 if (fi.type == ARMFault_QEMU_SFault) { 6484 qemu_log_mask(CPU_LOG_INT, 6485 "...SecureFault with SFSR.AUVIOL during stacking\n"); 6486 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; 6487 env->v7m.sfar = addr; 6488 exc = ARMV7M_EXCP_SECURE; 6489 exc_secure = false; 6490 } else { 6491 qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MSTKERR\n"); 6492 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK; 6493 exc = ARMV7M_EXCP_MEM; 6494 exc_secure = secure; 6495 } 6496 goto pend_fault; 6497 } 6498 address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value, 6499 attrs, &txres); 6500 if (txres != MEMTX_OK) { 6501 /* BusFault trying to write the data */ 6502 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n"); 6503 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK; 6504 exc = ARMV7M_EXCP_BUS; 6505 exc_secure = false; 6506 goto pend_fault; 6507 } 6508 return true; 6509 6510 pend_fault: 6511 /* By pending the exception at this point we are making 6512 * the IMPDEF choice "overridden exceptions pended" (see the 6513 * MergeExcInfo() pseudocode). The other choice would be to not 6514 * pend them now and then make a choice about which to throw away 6515 * later if we have two derived exceptions. 6516 * The only case when we must not pend the exception but instead 6517 * throw it away is if we are doing the push of the callee registers 6518 * and we've already generated a derived exception. Even in this 6519 * case we will still update the fault status registers. 6520 */ 6521 if (!ignfault) { 6522 armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure); 6523 } 6524 return false; 6525 } 6526 6527 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr, 6528 ARMMMUIdx mmu_idx) 6529 { 6530 CPUState *cs = CPU(cpu); 6531 CPUARMState *env = &cpu->env; 6532 MemTxAttrs attrs = {}; 6533 MemTxResult txres; 6534 target_ulong page_size; 6535 hwaddr physaddr; 6536 int prot; 6537 ARMMMUFaultInfo fi = {}; 6538 bool secure = mmu_idx & ARM_MMU_IDX_M_S; 6539 int exc; 6540 bool exc_secure; 6541 uint32_t value; 6542 6543 if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr, 6544 &attrs, &prot, &page_size, &fi, NULL)) { 6545 /* MPU/SAU lookup failed */ 6546 if (fi.type == ARMFault_QEMU_SFault) { 6547 qemu_log_mask(CPU_LOG_INT, 6548 "...SecureFault with SFSR.AUVIOL during unstack\n"); 6549 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; 6550 env->v7m.sfar = addr; 6551 exc = ARMV7M_EXCP_SECURE; 6552 exc_secure = false; 6553 } else { 6554 qemu_log_mask(CPU_LOG_INT, 6555 "...MemManageFault with CFSR.MUNSTKERR\n"); 6556 env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK; 6557 exc = ARMV7M_EXCP_MEM; 6558 exc_secure = secure; 6559 } 6560 goto pend_fault; 6561 } 6562 6563 value = address_space_ldl(arm_addressspace(cs, attrs), physaddr, 6564 attrs, &txres); 6565 if (txres != MEMTX_OK) { 6566 /* BusFault trying to read the data */ 6567 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n"); 6568 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK; 6569 exc = ARMV7M_EXCP_BUS; 6570 exc_secure = false; 6571 goto pend_fault; 6572 } 6573 6574 *dest = value; 6575 return true; 6576 6577 pend_fault: 6578 /* By pending the exception at this point we are making 6579 * the IMPDEF choice "overridden exceptions pended" (see the 6580 * MergeExcInfo() pseudocode). The other choice would be to not 6581 * pend them now and then make a choice about which to throw away 6582 * later if we have two derived exceptions. 6583 */ 6584 armv7m_nvic_set_pending(env->nvic, exc, exc_secure); 6585 return false; 6586 } 6587 6588 /* Write to v7M CONTROL.SPSEL bit for the specified security bank. 6589 * This may change the current stack pointer between Main and Process 6590 * stack pointers if it is done for the CONTROL register for the current 6591 * security state. 6592 */ 6593 static void write_v7m_control_spsel_for_secstate(CPUARMState *env, 6594 bool new_spsel, 6595 bool secstate) 6596 { 6597 bool old_is_psp = v7m_using_psp(env); 6598 6599 env->v7m.control[secstate] = 6600 deposit32(env->v7m.control[secstate], 6601 R_V7M_CONTROL_SPSEL_SHIFT, 6602 R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); 6603 6604 if (secstate == env->v7m.secure) { 6605 bool new_is_psp = v7m_using_psp(env); 6606 uint32_t tmp; 6607 6608 if (old_is_psp != new_is_psp) { 6609 tmp = env->v7m.other_sp; 6610 env->v7m.other_sp = env->regs[13]; 6611 env->regs[13] = tmp; 6612 } 6613 } 6614 } 6615 6616 /* Write to v7M CONTROL.SPSEL bit. This may change the current 6617 * stack pointer between Main and Process stack pointers. 6618 */ 6619 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) 6620 { 6621 write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure); 6622 } 6623 6624 void write_v7m_exception(CPUARMState *env, uint32_t new_exc) 6625 { 6626 /* Write a new value to v7m.exception, thus transitioning into or out 6627 * of Handler mode; this may result in a change of active stack pointer. 6628 */ 6629 bool new_is_psp, old_is_psp = v7m_using_psp(env); 6630 uint32_t tmp; 6631 6632 env->v7m.exception = new_exc; 6633 6634 new_is_psp = v7m_using_psp(env); 6635 6636 if (old_is_psp != new_is_psp) { 6637 tmp = env->v7m.other_sp; 6638 env->v7m.other_sp = env->regs[13]; 6639 env->regs[13] = tmp; 6640 } 6641 } 6642 6643 /* Switch M profile security state between NS and S */ 6644 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate) 6645 { 6646 uint32_t new_ss_msp, new_ss_psp; 6647 6648 if (env->v7m.secure == new_secstate) { 6649 return; 6650 } 6651 6652 /* All the banked state is accessed by looking at env->v7m.secure 6653 * except for the stack pointer; rearrange the SP appropriately. 6654 */ 6655 new_ss_msp = env->v7m.other_ss_msp; 6656 new_ss_psp = env->v7m.other_ss_psp; 6657 6658 if (v7m_using_psp(env)) { 6659 env->v7m.other_ss_psp = env->regs[13]; 6660 env->v7m.other_ss_msp = env->v7m.other_sp; 6661 } else { 6662 env->v7m.other_ss_msp = env->regs[13]; 6663 env->v7m.other_ss_psp = env->v7m.other_sp; 6664 } 6665 6666 env->v7m.secure = new_secstate; 6667 6668 if (v7m_using_psp(env)) { 6669 env->regs[13] = new_ss_psp; 6670 env->v7m.other_sp = new_ss_msp; 6671 } else { 6672 env->regs[13] = new_ss_msp; 6673 env->v7m.other_sp = new_ss_psp; 6674 } 6675 } 6676 6677 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) 6678 { 6679 /* Handle v7M BXNS: 6680 * - if the return value is a magic value, do exception return (like BX) 6681 * - otherwise bit 0 of the return value is the target security state 6682 */ 6683 uint32_t min_magic; 6684 6685 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6686 /* Covers FNC_RETURN and EXC_RETURN magic */ 6687 min_magic = FNC_RETURN_MIN_MAGIC; 6688 } else { 6689 /* EXC_RETURN magic only */ 6690 min_magic = EXC_RETURN_MIN_MAGIC; 6691 } 6692 6693 if (dest >= min_magic) { 6694 /* This is an exception return magic value; put it where 6695 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT. 6696 * Note that if we ever add gen_ss_advance() singlestep support to 6697 * M profile this should count as an "instruction execution complete" 6698 * event (compare gen_bx_excret_final_code()). 6699 */ 6700 env->regs[15] = dest & ~1; 6701 env->thumb = dest & 1; 6702 HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT); 6703 /* notreached */ 6704 } 6705 6706 /* translate.c should have made BXNS UNDEF unless we're secure */ 6707 assert(env->v7m.secure); 6708 6709 switch_v7m_security_state(env, dest & 1); 6710 env->thumb = 1; 6711 env->regs[15] = dest & ~1; 6712 } 6713 6714 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) 6715 { 6716 /* Handle v7M BLXNS: 6717 * - bit 0 of the destination address is the target security state 6718 */ 6719 6720 /* At this point regs[15] is the address just after the BLXNS */ 6721 uint32_t nextinst = env->regs[15] | 1; 6722 uint32_t sp = env->regs[13] - 8; 6723 uint32_t saved_psr; 6724 6725 /* translate.c will have made BLXNS UNDEF unless we're secure */ 6726 assert(env->v7m.secure); 6727 6728 if (dest & 1) { 6729 /* target is Secure, so this is just a normal BLX, 6730 * except that the low bit doesn't indicate Thumb/not. 6731 */ 6732 env->regs[14] = nextinst; 6733 env->thumb = 1; 6734 env->regs[15] = dest & ~1; 6735 return; 6736 } 6737 6738 /* Target is non-secure: first push a stack frame */ 6739 if (!QEMU_IS_ALIGNED(sp, 8)) { 6740 qemu_log_mask(LOG_GUEST_ERROR, 6741 "BLXNS with misaligned SP is UNPREDICTABLE\n"); 6742 } 6743 6744 if (sp < v7m_sp_limit(env)) { 6745 raise_exception(env, EXCP_STKOF, 0, 1); 6746 } 6747 6748 saved_psr = env->v7m.exception; 6749 if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) { 6750 saved_psr |= XPSR_SFPA; 6751 } 6752 6753 /* Note that these stores can throw exceptions on MPU faults */ 6754 cpu_stl_data(env, sp, nextinst); 6755 cpu_stl_data(env, sp + 4, saved_psr); 6756 6757 env->regs[13] = sp; 6758 env->regs[14] = 0xfeffffff; 6759 if (arm_v7m_is_handler_mode(env)) { 6760 /* Write a dummy value to IPSR, to avoid leaking the current secure 6761 * exception number to non-secure code. This is guaranteed not 6762 * to cause write_v7m_exception() to actually change stacks. 6763 */ 6764 write_v7m_exception(env, 1); 6765 } 6766 switch_v7m_security_state(env, 0); 6767 env->thumb = 1; 6768 env->regs[15] = dest; 6769 } 6770 6771 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, 6772 bool spsel) 6773 { 6774 /* Return a pointer to the location where we currently store the 6775 * stack pointer for the requested security state and thread mode. 6776 * This pointer will become invalid if the CPU state is updated 6777 * such that the stack pointers are switched around (eg changing 6778 * the SPSEL control bit). 6779 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode(). 6780 * Unlike that pseudocode, we require the caller to pass us in the 6781 * SPSEL control bit value; this is because we also use this 6782 * function in handling of pushing of the callee-saves registers 6783 * part of the v8M stack frame (pseudocode PushCalleeStack()), 6784 * and in the tailchain codepath the SPSEL bit comes from the exception 6785 * return magic LR value from the previous exception. The pseudocode 6786 * opencodes the stack-selection in PushCalleeStack(), but we prefer 6787 * to make this utility function generic enough to do the job. 6788 */ 6789 bool want_psp = threadmode && spsel; 6790 6791 if (secure == env->v7m.secure) { 6792 if (want_psp == v7m_using_psp(env)) { 6793 return &env->regs[13]; 6794 } else { 6795 return &env->v7m.other_sp; 6796 } 6797 } else { 6798 if (want_psp) { 6799 return &env->v7m.other_ss_psp; 6800 } else { 6801 return &env->v7m.other_ss_msp; 6802 } 6803 } 6804 } 6805 6806 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure, 6807 uint32_t *pvec) 6808 { 6809 CPUState *cs = CPU(cpu); 6810 CPUARMState *env = &cpu->env; 6811 MemTxResult result; 6812 uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4; 6813 uint32_t vector_entry; 6814 MemTxAttrs attrs = {}; 6815 ARMMMUIdx mmu_idx; 6816 bool exc_secure; 6817 6818 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true); 6819 6820 /* We don't do a get_phys_addr() here because the rules for vector 6821 * loads are special: they always use the default memory map, and 6822 * the default memory map permits reads from all addresses. 6823 * Since there's no easy way to pass through to pmsav8_mpu_lookup() 6824 * that we want this special case which would always say "yes", 6825 * we just do the SAU lookup here followed by a direct physical load. 6826 */ 6827 attrs.secure = targets_secure; 6828 attrs.user = false; 6829 6830 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6831 V8M_SAttributes sattrs = {}; 6832 6833 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); 6834 if (sattrs.ns) { 6835 attrs.secure = false; 6836 } else if (!targets_secure) { 6837 /* NS access to S memory */ 6838 goto load_fail; 6839 } 6840 } 6841 6842 vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr, 6843 attrs, &result); 6844 if (result != MEMTX_OK) { 6845 goto load_fail; 6846 } 6847 *pvec = vector_entry; 6848 return true; 6849 6850 load_fail: 6851 /* All vector table fetch fails are reported as HardFault, with 6852 * HFSR.VECTTBL and .FORCED set. (FORCED is set because 6853 * technically the underlying exception is a MemManage or BusFault 6854 * that is escalated to HardFault.) This is a terminal exception, 6855 * so we will either take the HardFault immediately or else enter 6856 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()). 6857 */ 6858 exc_secure = targets_secure || 6859 !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK); 6860 env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK; 6861 armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure); 6862 return false; 6863 } 6864 6865 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, 6866 bool ignore_faults) 6867 { 6868 /* For v8M, push the callee-saves register part of the stack frame. 6869 * Compare the v8M pseudocode PushCalleeStack(). 6870 * In the tailchaining case this may not be the current stack. 6871 */ 6872 CPUARMState *env = &cpu->env; 6873 uint32_t *frame_sp_p; 6874 uint32_t frameptr; 6875 ARMMMUIdx mmu_idx; 6876 bool stacked_ok; 6877 uint32_t limit; 6878 bool want_psp; 6879 6880 if (dotailchain) { 6881 bool mode = lr & R_V7M_EXCRET_MODE_MASK; 6882 bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) || 6883 !mode; 6884 6885 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv); 6886 frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode, 6887 lr & R_V7M_EXCRET_SPSEL_MASK); 6888 want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK); 6889 if (want_psp) { 6890 limit = env->v7m.psplim[M_REG_S]; 6891 } else { 6892 limit = env->v7m.msplim[M_REG_S]; 6893 } 6894 } else { 6895 mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 6896 frame_sp_p = &env->regs[13]; 6897 limit = v7m_sp_limit(env); 6898 } 6899 6900 frameptr = *frame_sp_p - 0x28; 6901 if (frameptr < limit) { 6902 /* 6903 * Stack limit failure: set SP to the limit value, and generate 6904 * STKOF UsageFault. Stack pushes below the limit must not be 6905 * performed. It is IMPDEF whether pushes above the limit are 6906 * performed; we choose not to. 6907 */ 6908 qemu_log_mask(CPU_LOG_INT, 6909 "...STKOF during callee-saves register stacking\n"); 6910 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK; 6911 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 6912 env->v7m.secure); 6913 *frame_sp_p = limit; 6914 return true; 6915 } 6916 6917 /* Write as much of the stack frame as we can. A write failure may 6918 * cause us to pend a derived exception. 6919 */ 6920 stacked_ok = 6921 v7m_stack_write(cpu, frameptr, 0xfefa125b, mmu_idx, ignore_faults) && 6922 v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, 6923 ignore_faults) && 6924 v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, 6925 ignore_faults) && 6926 v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, 6927 ignore_faults) && 6928 v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, 6929 ignore_faults) && 6930 v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, 6931 ignore_faults) && 6932 v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, 6933 ignore_faults) && 6934 v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, 6935 ignore_faults) && 6936 v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, 6937 ignore_faults); 6938 6939 /* Update SP regardless of whether any of the stack accesses failed. */ 6940 *frame_sp_p = frameptr; 6941 6942 return !stacked_ok; 6943 } 6944 6945 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain, 6946 bool ignore_stackfaults) 6947 { 6948 /* Do the "take the exception" parts of exception entry, 6949 * but not the pushing of state to the stack. This is 6950 * similar to the pseudocode ExceptionTaken() function. 6951 */ 6952 CPUARMState *env = &cpu->env; 6953 uint32_t addr; 6954 bool targets_secure; 6955 int exc; 6956 bool push_failed = false; 6957 6958 armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); 6959 qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n", 6960 targets_secure ? "secure" : "nonsecure", exc); 6961 6962 if (arm_feature(env, ARM_FEATURE_V8)) { 6963 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && 6964 (lr & R_V7M_EXCRET_S_MASK)) { 6965 /* The background code (the owner of the registers in the 6966 * exception frame) is Secure. This means it may either already 6967 * have or now needs to push callee-saves registers. 6968 */ 6969 if (targets_secure) { 6970 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) { 6971 /* We took an exception from Secure to NonSecure 6972 * (which means the callee-saved registers got stacked) 6973 * and are now tailchaining to a Secure exception. 6974 * Clear DCRS so eventual return from this Secure 6975 * exception unstacks the callee-saved registers. 6976 */ 6977 lr &= ~R_V7M_EXCRET_DCRS_MASK; 6978 } 6979 } else { 6980 /* We're going to a non-secure exception; push the 6981 * callee-saves registers to the stack now, if they're 6982 * not already saved. 6983 */ 6984 if (lr & R_V7M_EXCRET_DCRS_MASK && 6985 !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) { 6986 push_failed = v7m_push_callee_stack(cpu, lr, dotailchain, 6987 ignore_stackfaults); 6988 } 6989 lr |= R_V7M_EXCRET_DCRS_MASK; 6990 } 6991 } 6992 6993 lr &= ~R_V7M_EXCRET_ES_MASK; 6994 if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6995 lr |= R_V7M_EXCRET_ES_MASK; 6996 } 6997 lr &= ~R_V7M_EXCRET_SPSEL_MASK; 6998 if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) { 6999 lr |= R_V7M_EXCRET_SPSEL_MASK; 7000 } 7001 7002 /* Clear registers if necessary to prevent non-secure exception 7003 * code being able to see register values from secure code. 7004 * Where register values become architecturally UNKNOWN we leave 7005 * them with their previous values. 7006 */ 7007 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7008 if (!targets_secure) { 7009 /* Always clear the caller-saved registers (they have been 7010 * pushed to the stack earlier in v7m_push_stack()). 7011 * Clear callee-saved registers if the background code is 7012 * Secure (in which case these regs were saved in 7013 * v7m_push_callee_stack()). 7014 */ 7015 int i; 7016 7017 for (i = 0; i < 13; i++) { 7018 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */ 7019 if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) { 7020 env->regs[i] = 0; 7021 } 7022 } 7023 /* Clear EAPSR */ 7024 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT); 7025 } 7026 } 7027 } 7028 7029 if (push_failed && !ignore_stackfaults) { 7030 /* Derived exception on callee-saves register stacking: 7031 * we might now want to take a different exception which 7032 * targets a different security state, so try again from the top. 7033 */ 7034 qemu_log_mask(CPU_LOG_INT, 7035 "...derived exception on callee-saves register stacking"); 7036 v7m_exception_taken(cpu, lr, true, true); 7037 return; 7038 } 7039 7040 if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) { 7041 /* Vector load failed: derived exception */ 7042 qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load"); 7043 v7m_exception_taken(cpu, lr, true, true); 7044 return; 7045 } 7046 7047 /* Now we've done everything that might cause a derived exception 7048 * we can go ahead and activate whichever exception we're going to 7049 * take (which might now be the derived exception). 7050 */ 7051 armv7m_nvic_acknowledge_irq(env->nvic); 7052 7053 /* Switch to target security state -- must do this before writing SPSEL */ 7054 switch_v7m_security_state(env, targets_secure); 7055 write_v7m_control_spsel(env, 0); 7056 arm_clear_exclusive(env); 7057 /* Clear IT bits */ 7058 env->condexec_bits = 0; 7059 env->regs[14] = lr; 7060 env->regs[15] = addr & 0xfffffffe; 7061 env->thumb = addr & 1; 7062 } 7063 7064 static bool v7m_push_stack(ARMCPU *cpu) 7065 { 7066 /* Do the "set up stack frame" part of exception entry, 7067 * similar to pseudocode PushStack(). 7068 * Return true if we generate a derived exception (and so 7069 * should ignore further stack faults trying to process 7070 * that derived exception.) 7071 */ 7072 bool stacked_ok; 7073 CPUARMState *env = &cpu->env; 7074 uint32_t xpsr = xpsr_read(env); 7075 uint32_t frameptr = env->regs[13]; 7076 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 7077 7078 /* Align stack pointer if the guest wants that */ 7079 if ((frameptr & 4) && 7080 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) { 7081 frameptr -= 4; 7082 xpsr |= XPSR_SPREALIGN; 7083 } 7084 7085 frameptr -= 0x20; 7086 7087 if (arm_feature(env, ARM_FEATURE_V8)) { 7088 uint32_t limit = v7m_sp_limit(env); 7089 7090 if (frameptr < limit) { 7091 /* 7092 * Stack limit failure: set SP to the limit value, and generate 7093 * STKOF UsageFault. Stack pushes below the limit must not be 7094 * performed. It is IMPDEF whether pushes above the limit are 7095 * performed; we choose not to. 7096 */ 7097 qemu_log_mask(CPU_LOG_INT, 7098 "...STKOF during stacking\n"); 7099 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK; 7100 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 7101 env->v7m.secure); 7102 env->regs[13] = limit; 7103 return true; 7104 } 7105 } 7106 7107 /* Write as much of the stack frame as we can. If we fail a stack 7108 * write this will result in a derived exception being pended 7109 * (which may be taken in preference to the one we started with 7110 * if it has higher priority). 7111 */ 7112 stacked_ok = 7113 v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, false) && 7114 v7m_stack_write(cpu, frameptr + 4, env->regs[1], mmu_idx, false) && 7115 v7m_stack_write(cpu, frameptr + 8, env->regs[2], mmu_idx, false) && 7116 v7m_stack_write(cpu, frameptr + 12, env->regs[3], mmu_idx, false) && 7117 v7m_stack_write(cpu, frameptr + 16, env->regs[12], mmu_idx, false) && 7118 v7m_stack_write(cpu, frameptr + 20, env->regs[14], mmu_idx, false) && 7119 v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) && 7120 v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false); 7121 7122 /* Update SP regardless of whether any of the stack accesses failed. */ 7123 env->regs[13] = frameptr; 7124 7125 return !stacked_ok; 7126 } 7127 7128 static void do_v7m_exception_exit(ARMCPU *cpu) 7129 { 7130 CPUARMState *env = &cpu->env; 7131 uint32_t excret; 7132 uint32_t xpsr; 7133 bool ufault = false; 7134 bool sfault = false; 7135 bool return_to_sp_process; 7136 bool return_to_handler; 7137 bool rettobase = false; 7138 bool exc_secure = false; 7139 bool return_to_secure; 7140 7141 /* If we're not in Handler mode then jumps to magic exception-exit 7142 * addresses don't have magic behaviour. However for the v8M 7143 * security extensions the magic secure-function-return has to 7144 * work in thread mode too, so to avoid doing an extra check in 7145 * the generated code we allow exception-exit magic to also cause the 7146 * internal exception and bring us here in thread mode. Correct code 7147 * will never try to do this (the following insn fetch will always 7148 * fault) so we the overhead of having taken an unnecessary exception 7149 * doesn't matter. 7150 */ 7151 if (!arm_v7m_is_handler_mode(env)) { 7152 return; 7153 } 7154 7155 /* In the spec pseudocode ExceptionReturn() is called directly 7156 * from BXWritePC() and gets the full target PC value including 7157 * bit zero. In QEMU's implementation we treat it as a normal 7158 * jump-to-register (which is then caught later on), and so split 7159 * the target value up between env->regs[15] and env->thumb in 7160 * gen_bx(). Reconstitute it. 7161 */ 7162 excret = env->regs[15]; 7163 if (env->thumb) { 7164 excret |= 1; 7165 } 7166 7167 qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32 7168 " previous exception %d\n", 7169 excret, env->v7m.exception); 7170 7171 if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) { 7172 qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception " 7173 "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n", 7174 excret); 7175 } 7176 7177 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7178 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before 7179 * we pick which FAULTMASK to clear. 7180 */ 7181 if (!env->v7m.secure && 7182 ((excret & R_V7M_EXCRET_ES_MASK) || 7183 !(excret & R_V7M_EXCRET_DCRS_MASK))) { 7184 sfault = 1; 7185 /* For all other purposes, treat ES as 0 (R_HXSR) */ 7186 excret &= ~R_V7M_EXCRET_ES_MASK; 7187 } 7188 exc_secure = excret & R_V7M_EXCRET_ES_MASK; 7189 } 7190 7191 if (env->v7m.exception != ARMV7M_EXCP_NMI) { 7192 /* Auto-clear FAULTMASK on return from other than NMI. 7193 * If the security extension is implemented then this only 7194 * happens if the raw execution priority is >= 0; the 7195 * value of the ES bit in the exception return value indicates 7196 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.) 7197 */ 7198 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7199 if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) { 7200 env->v7m.faultmask[exc_secure] = 0; 7201 } 7202 } else { 7203 env->v7m.faultmask[M_REG_NS] = 0; 7204 } 7205 } 7206 7207 switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception, 7208 exc_secure)) { 7209 case -1: 7210 /* attempt to exit an exception that isn't active */ 7211 ufault = true; 7212 break; 7213 case 0: 7214 /* still an irq active now */ 7215 break; 7216 case 1: 7217 /* we returned to base exception level, no nesting. 7218 * (In the pseudocode this is written using "NestedActivation != 1" 7219 * where we have 'rettobase == false'.) 7220 */ 7221 rettobase = true; 7222 break; 7223 default: 7224 g_assert_not_reached(); 7225 } 7226 7227 return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK); 7228 return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK; 7229 return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && 7230 (excret & R_V7M_EXCRET_S_MASK); 7231 7232 if (arm_feature(env, ARM_FEATURE_V8)) { 7233 if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7234 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP); 7235 * we choose to take the UsageFault. 7236 */ 7237 if ((excret & R_V7M_EXCRET_S_MASK) || 7238 (excret & R_V7M_EXCRET_ES_MASK) || 7239 !(excret & R_V7M_EXCRET_DCRS_MASK)) { 7240 ufault = true; 7241 } 7242 } 7243 if (excret & R_V7M_EXCRET_RES0_MASK) { 7244 ufault = true; 7245 } 7246 } else { 7247 /* For v7M we only recognize certain combinations of the low bits */ 7248 switch (excret & 0xf) { 7249 case 1: /* Return to Handler */ 7250 break; 7251 case 13: /* Return to Thread using Process stack */ 7252 case 9: /* Return to Thread using Main stack */ 7253 /* We only need to check NONBASETHRDENA for v7M, because in 7254 * v8M this bit does not exist (it is RES1). 7255 */ 7256 if (!rettobase && 7257 !(env->v7m.ccr[env->v7m.secure] & 7258 R_V7M_CCR_NONBASETHRDENA_MASK)) { 7259 ufault = true; 7260 } 7261 break; 7262 default: 7263 ufault = true; 7264 } 7265 } 7266 7267 /* 7268 * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in 7269 * Handler mode (and will be until we write the new XPSR.Interrupt 7270 * field) this does not switch around the current stack pointer. 7271 * We must do this before we do any kind of tailchaining, including 7272 * for the derived exceptions on integrity check failures, or we will 7273 * give the guest an incorrect EXCRET.SPSEL value on exception entry. 7274 */ 7275 write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure); 7276 7277 if (sfault) { 7278 env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK; 7279 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7280 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " 7281 "stackframe: failed EXC_RETURN.ES validity check\n"); 7282 v7m_exception_taken(cpu, excret, true, false); 7283 return; 7284 } 7285 7286 if (ufault) { 7287 /* Bad exception return: instead of popping the exception 7288 * stack, directly take a usage fault on the current stack. 7289 */ 7290 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7291 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7292 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " 7293 "stackframe: failed exception return integrity check\n"); 7294 v7m_exception_taken(cpu, excret, true, false); 7295 return; 7296 } 7297 7298 /* 7299 * Tailchaining: if there is currently a pending exception that 7300 * is high enough priority to preempt execution at the level we're 7301 * about to return to, then just directly take that exception now, 7302 * avoiding an unstack-and-then-stack. Note that now we have 7303 * deactivated the previous exception by calling armv7m_nvic_complete_irq() 7304 * our current execution priority is already the execution priority we are 7305 * returning to -- none of the state we would unstack or set based on 7306 * the EXCRET value affects it. 7307 */ 7308 if (armv7m_nvic_can_take_pending_exception(env->nvic)) { 7309 qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n"); 7310 v7m_exception_taken(cpu, excret, true, false); 7311 return; 7312 } 7313 7314 switch_v7m_security_state(env, return_to_secure); 7315 7316 { 7317 /* The stack pointer we should be reading the exception frame from 7318 * depends on bits in the magic exception return type value (and 7319 * for v8M isn't necessarily the stack pointer we will eventually 7320 * end up resuming execution with). Get a pointer to the location 7321 * in the CPU state struct where the SP we need is currently being 7322 * stored; we will use and modify it in place. 7323 * We use this limited C variable scope so we don't accidentally 7324 * use 'frame_sp_p' after we do something that makes it invalid. 7325 */ 7326 uint32_t *frame_sp_p = get_v7m_sp_ptr(env, 7327 return_to_secure, 7328 !return_to_handler, 7329 return_to_sp_process); 7330 uint32_t frameptr = *frame_sp_p; 7331 bool pop_ok = true; 7332 ARMMMUIdx mmu_idx; 7333 bool return_to_priv = return_to_handler || 7334 !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK); 7335 7336 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure, 7337 return_to_priv); 7338 7339 if (!QEMU_IS_ALIGNED(frameptr, 8) && 7340 arm_feature(env, ARM_FEATURE_V8)) { 7341 qemu_log_mask(LOG_GUEST_ERROR, 7342 "M profile exception return with non-8-aligned SP " 7343 "for destination state is UNPREDICTABLE\n"); 7344 } 7345 7346 /* Do we need to pop callee-saved registers? */ 7347 if (return_to_secure && 7348 ((excret & R_V7M_EXCRET_ES_MASK) == 0 || 7349 (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) { 7350 uint32_t expected_sig = 0xfefa125b; 7351 uint32_t actual_sig; 7352 7353 pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx); 7354 7355 if (pop_ok && expected_sig != actual_sig) { 7356 /* Take a SecureFault on the current stack */ 7357 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK; 7358 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7359 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " 7360 "stackframe: failed exception return integrity " 7361 "signature check\n"); 7362 v7m_exception_taken(cpu, excret, true, false); 7363 return; 7364 } 7365 7366 pop_ok = pop_ok && 7367 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && 7368 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) && 7369 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) && 7370 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) && 7371 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) && 7372 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) && 7373 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) && 7374 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx); 7375 7376 frameptr += 0x28; 7377 } 7378 7379 /* Pop registers */ 7380 pop_ok = pop_ok && 7381 v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) && 7382 v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) && 7383 v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) && 7384 v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) && 7385 v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) && 7386 v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) && 7387 v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) && 7388 v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx); 7389 7390 if (!pop_ok) { 7391 /* v7m_stack_read() pended a fault, so take it (as a tail 7392 * chained exception on the same stack frame) 7393 */ 7394 qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n"); 7395 v7m_exception_taken(cpu, excret, true, false); 7396 return; 7397 } 7398 7399 /* Returning from an exception with a PC with bit 0 set is defined 7400 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified 7401 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore 7402 * the lsbit, and there are several RTOSes out there which incorrectly 7403 * assume the r15 in the stack frame should be a Thumb-style "lsbit 7404 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but 7405 * complain about the badly behaved guest. 7406 */ 7407 if (env->regs[15] & 1) { 7408 env->regs[15] &= ~1U; 7409 if (!arm_feature(env, ARM_FEATURE_V8)) { 7410 qemu_log_mask(LOG_GUEST_ERROR, 7411 "M profile return from interrupt with misaligned " 7412 "PC is UNPREDICTABLE on v7M\n"); 7413 } 7414 } 7415 7416 if (arm_feature(env, ARM_FEATURE_V8)) { 7417 /* For v8M we have to check whether the xPSR exception field 7418 * matches the EXCRET value for return to handler/thread 7419 * before we commit to changing the SP and xPSR. 7420 */ 7421 bool will_be_handler = (xpsr & XPSR_EXCP) != 0; 7422 if (return_to_handler != will_be_handler) { 7423 /* Take an INVPC UsageFault on the current stack. 7424 * By this point we will have switched to the security state 7425 * for the background state, so this UsageFault will target 7426 * that state. 7427 */ 7428 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 7429 env->v7m.secure); 7430 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7431 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " 7432 "stackframe: failed exception return integrity " 7433 "check\n"); 7434 v7m_exception_taken(cpu, excret, true, false); 7435 return; 7436 } 7437 } 7438 7439 /* Commit to consuming the stack frame */ 7440 frameptr += 0x20; 7441 /* Undo stack alignment (the SPREALIGN bit indicates that the original 7442 * pre-exception SP was not 8-aligned and we added a padding word to 7443 * align it, so we undo this by ORing in the bit that increases it 7444 * from the current 8-aligned value to the 8-unaligned value. (Adding 4 7445 * would work too but a logical OR is how the pseudocode specifies it.) 7446 */ 7447 if (xpsr & XPSR_SPREALIGN) { 7448 frameptr |= 4; 7449 } 7450 *frame_sp_p = frameptr; 7451 } 7452 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */ 7453 xpsr_write(env, xpsr, ~XPSR_SPREALIGN); 7454 7455 /* The restored xPSR exception field will be zero if we're 7456 * resuming in Thread mode. If that doesn't match what the 7457 * exception return excret specified then this is a UsageFault. 7458 * v7M requires we make this check here; v8M did it earlier. 7459 */ 7460 if (return_to_handler != arm_v7m_is_handler_mode(env)) { 7461 /* Take an INVPC UsageFault by pushing the stack again; 7462 * we know we're v7M so this is never a Secure UsageFault. 7463 */ 7464 bool ignore_stackfaults; 7465 7466 assert(!arm_feature(env, ARM_FEATURE_V8)); 7467 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); 7468 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7469 ignore_stackfaults = v7m_push_stack(cpu); 7470 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: " 7471 "failed exception return integrity check\n"); 7472 v7m_exception_taken(cpu, excret, false, ignore_stackfaults); 7473 return; 7474 } 7475 7476 /* Otherwise, we have a successful exception exit. */ 7477 arm_clear_exclusive(env); 7478 qemu_log_mask(CPU_LOG_INT, "...successful exception return\n"); 7479 } 7480 7481 static bool do_v7m_function_return(ARMCPU *cpu) 7482 { 7483 /* v8M security extensions magic function return. 7484 * We may either: 7485 * (1) throw an exception (longjump) 7486 * (2) return true if we successfully handled the function return 7487 * (3) return false if we failed a consistency check and have 7488 * pended a UsageFault that needs to be taken now 7489 * 7490 * At this point the magic return value is split between env->regs[15] 7491 * and env->thumb. We don't bother to reconstitute it because we don't 7492 * need it (all values are handled the same way). 7493 */ 7494 CPUARMState *env = &cpu->env; 7495 uint32_t newpc, newpsr, newpsr_exc; 7496 7497 qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n"); 7498 7499 { 7500 bool threadmode, spsel; 7501 TCGMemOpIdx oi; 7502 ARMMMUIdx mmu_idx; 7503 uint32_t *frame_sp_p; 7504 uint32_t frameptr; 7505 7506 /* Pull the return address and IPSR from the Secure stack */ 7507 threadmode = !arm_v7m_is_handler_mode(env); 7508 spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK; 7509 7510 frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel); 7511 frameptr = *frame_sp_p; 7512 7513 /* These loads may throw an exception (for MPU faults). We want to 7514 * do them as secure, so work out what MMU index that is. 7515 */ 7516 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); 7517 oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx)); 7518 newpc = helper_le_ldul_mmu(env, frameptr, oi, 0); 7519 newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0); 7520 7521 /* Consistency checks on new IPSR */ 7522 newpsr_exc = newpsr & XPSR_EXCP; 7523 if (!((env->v7m.exception == 0 && newpsr_exc == 0) || 7524 (env->v7m.exception == 1 && newpsr_exc != 0))) { 7525 /* Pend the fault and tell our caller to take it */ 7526 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7527 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 7528 env->v7m.secure); 7529 qemu_log_mask(CPU_LOG_INT, 7530 "...taking INVPC UsageFault: " 7531 "IPSR consistency check failed\n"); 7532 return false; 7533 } 7534 7535 *frame_sp_p = frameptr + 8; 7536 } 7537 7538 /* This invalidates frame_sp_p */ 7539 switch_v7m_security_state(env, true); 7540 env->v7m.exception = newpsr_exc; 7541 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; 7542 if (newpsr & XPSR_SFPA) { 7543 env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK; 7544 } 7545 xpsr_write(env, 0, XPSR_IT); 7546 env->thumb = newpc & 1; 7547 env->regs[15] = newpc & ~1; 7548 7549 qemu_log_mask(CPU_LOG_INT, "...function return successful\n"); 7550 return true; 7551 } 7552 7553 static void arm_log_exception(int idx) 7554 { 7555 if (qemu_loglevel_mask(CPU_LOG_INT)) { 7556 const char *exc = NULL; 7557 static const char * const excnames[] = { 7558 [EXCP_UDEF] = "Undefined Instruction", 7559 [EXCP_SWI] = "SVC", 7560 [EXCP_PREFETCH_ABORT] = "Prefetch Abort", 7561 [EXCP_DATA_ABORT] = "Data Abort", 7562 [EXCP_IRQ] = "IRQ", 7563 [EXCP_FIQ] = "FIQ", 7564 [EXCP_BKPT] = "Breakpoint", 7565 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", 7566 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", 7567 [EXCP_HVC] = "Hypervisor Call", 7568 [EXCP_HYP_TRAP] = "Hypervisor Trap", 7569 [EXCP_SMC] = "Secure Monitor Call", 7570 [EXCP_VIRQ] = "Virtual IRQ", 7571 [EXCP_VFIQ] = "Virtual FIQ", 7572 [EXCP_SEMIHOST] = "Semihosting call", 7573 [EXCP_NOCP] = "v7M NOCP UsageFault", 7574 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", 7575 [EXCP_STKOF] = "v8M STKOF UsageFault", 7576 }; 7577 7578 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { 7579 exc = excnames[idx]; 7580 } 7581 if (!exc) { 7582 exc = "unknown"; 7583 } 7584 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); 7585 } 7586 } 7587 7588 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, 7589 uint32_t addr, uint16_t *insn) 7590 { 7591 /* Load a 16-bit portion of a v7M instruction, returning true on success, 7592 * or false on failure (in which case we will have pended the appropriate 7593 * exception). 7594 * We need to do the instruction fetch's MPU and SAU checks 7595 * like this because there is no MMU index that would allow 7596 * doing the load with a single function call. Instead we must 7597 * first check that the security attributes permit the load 7598 * and that they don't mismatch on the two halves of the instruction, 7599 * and then we do the load as a secure load (ie using the security 7600 * attributes of the address, not the CPU, as architecturally required). 7601 */ 7602 CPUState *cs = CPU(cpu); 7603 CPUARMState *env = &cpu->env; 7604 V8M_SAttributes sattrs = {}; 7605 MemTxAttrs attrs = {}; 7606 ARMMMUFaultInfo fi = {}; 7607 MemTxResult txres; 7608 target_ulong page_size; 7609 hwaddr physaddr; 7610 int prot; 7611 7612 v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs); 7613 if (!sattrs.nsc || sattrs.ns) { 7614 /* This must be the second half of the insn, and it straddles a 7615 * region boundary with the second half not being S&NSC. 7616 */ 7617 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7618 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7619 qemu_log_mask(CPU_LOG_INT, 7620 "...really SecureFault with SFSR.INVEP\n"); 7621 return false; 7622 } 7623 if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, 7624 &physaddr, &attrs, &prot, &page_size, &fi, NULL)) { 7625 /* the MPU lookup failed */ 7626 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; 7627 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); 7628 qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n"); 7629 return false; 7630 } 7631 *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr, 7632 attrs, &txres); 7633 if (txres != MEMTX_OK) { 7634 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; 7635 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); 7636 qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n"); 7637 return false; 7638 } 7639 return true; 7640 } 7641 7642 static bool v7m_handle_execute_nsc(ARMCPU *cpu) 7643 { 7644 /* Check whether this attempt to execute code in a Secure & NS-Callable 7645 * memory region is for an SG instruction; if so, then emulate the 7646 * effect of the SG instruction and return true. Otherwise pend 7647 * the correct kind of exception and return false. 7648 */ 7649 CPUARMState *env = &cpu->env; 7650 ARMMMUIdx mmu_idx; 7651 uint16_t insn; 7652 7653 /* We should never get here unless get_phys_addr_pmsav8() caused 7654 * an exception for NS executing in S&NSC memory. 7655 */ 7656 assert(!env->v7m.secure); 7657 assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); 7658 7659 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */ 7660 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); 7661 7662 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) { 7663 return false; 7664 } 7665 7666 if (!env->thumb) { 7667 goto gen_invep; 7668 } 7669 7670 if (insn != 0xe97f) { 7671 /* Not an SG instruction first half (we choose the IMPDEF 7672 * early-SG-check option). 7673 */ 7674 goto gen_invep; 7675 } 7676 7677 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) { 7678 return false; 7679 } 7680 7681 if (insn != 0xe97f) { 7682 /* Not an SG instruction second half (yes, both halves of the SG 7683 * insn have the same hex value) 7684 */ 7685 goto gen_invep; 7686 } 7687 7688 /* OK, we have confirmed that we really have an SG instruction. 7689 * We know we're NS in S memory so don't need to repeat those checks. 7690 */ 7691 qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32 7692 ", executing it\n", env->regs[15]); 7693 env->regs[14] &= ~1; 7694 switch_v7m_security_state(env, true); 7695 xpsr_write(env, 0, XPSR_IT); 7696 env->regs[15] += 4; 7697 return true; 7698 7699 gen_invep: 7700 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7701 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7702 qemu_log_mask(CPU_LOG_INT, 7703 "...really SecureFault with SFSR.INVEP\n"); 7704 return false; 7705 } 7706 7707 void arm_v7m_cpu_do_interrupt(CPUState *cs) 7708 { 7709 ARMCPU *cpu = ARM_CPU(cs); 7710 CPUARMState *env = &cpu->env; 7711 uint32_t lr; 7712 bool ignore_stackfaults; 7713 7714 arm_log_exception(cs->exception_index); 7715 7716 /* For exceptions we just mark as pending on the NVIC, and let that 7717 handle it. */ 7718 switch (cs->exception_index) { 7719 case EXCP_UDEF: 7720 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7721 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK; 7722 break; 7723 case EXCP_NOCP: 7724 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7725 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK; 7726 break; 7727 case EXCP_INVSTATE: 7728 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7729 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK; 7730 break; 7731 case EXCP_STKOF: 7732 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7733 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK; 7734 break; 7735 case EXCP_SWI: 7736 /* The PC already points to the next instruction. */ 7737 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure); 7738 break; 7739 case EXCP_PREFETCH_ABORT: 7740 case EXCP_DATA_ABORT: 7741 /* Note that for M profile we don't have a guest facing FSR, but 7742 * the env->exception.fsr will be populated by the code that 7743 * raises the fault, in the A profile short-descriptor format. 7744 */ 7745 switch (env->exception.fsr & 0xf) { 7746 case M_FAKE_FSR_NSC_EXEC: 7747 /* Exception generated when we try to execute code at an address 7748 * which is marked as Secure & Non-Secure Callable and the CPU 7749 * is in the Non-Secure state. The only instruction which can 7750 * be executed like this is SG (and that only if both halves of 7751 * the SG instruction have the same security attributes.) 7752 * Everything else must generate an INVEP SecureFault, so we 7753 * emulate the SG instruction here. 7754 */ 7755 if (v7m_handle_execute_nsc(cpu)) { 7756 return; 7757 } 7758 break; 7759 case M_FAKE_FSR_SFAULT: 7760 /* Various flavours of SecureFault for attempts to execute or 7761 * access data in the wrong security state. 7762 */ 7763 switch (cs->exception_index) { 7764 case EXCP_PREFETCH_ABORT: 7765 if (env->v7m.secure) { 7766 env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK; 7767 qemu_log_mask(CPU_LOG_INT, 7768 "...really SecureFault with SFSR.INVTRAN\n"); 7769 } else { 7770 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7771 qemu_log_mask(CPU_LOG_INT, 7772 "...really SecureFault with SFSR.INVEP\n"); 7773 } 7774 break; 7775 case EXCP_DATA_ABORT: 7776 /* This must be an NS access to S memory */ 7777 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK; 7778 qemu_log_mask(CPU_LOG_INT, 7779 "...really SecureFault with SFSR.AUVIOL\n"); 7780 break; 7781 } 7782 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7783 break; 7784 case 0x8: /* External Abort */ 7785 switch (cs->exception_index) { 7786 case EXCP_PREFETCH_ABORT: 7787 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; 7788 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n"); 7789 break; 7790 case EXCP_DATA_ABORT: 7791 env->v7m.cfsr[M_REG_NS] |= 7792 (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK); 7793 env->v7m.bfar = env->exception.vaddress; 7794 qemu_log_mask(CPU_LOG_INT, 7795 "...with CFSR.PRECISERR and BFAR 0x%x\n", 7796 env->v7m.bfar); 7797 break; 7798 } 7799 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); 7800 break; 7801 default: 7802 /* All other FSR values are either MPU faults or "can't happen 7803 * for M profile" cases. 7804 */ 7805 switch (cs->exception_index) { 7806 case EXCP_PREFETCH_ABORT: 7807 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; 7808 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n"); 7809 break; 7810 case EXCP_DATA_ABORT: 7811 env->v7m.cfsr[env->v7m.secure] |= 7812 (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK); 7813 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress; 7814 qemu_log_mask(CPU_LOG_INT, 7815 "...with CFSR.DACCVIOL and MMFAR 0x%x\n", 7816 env->v7m.mmfar[env->v7m.secure]); 7817 break; 7818 } 7819 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, 7820 env->v7m.secure); 7821 break; 7822 } 7823 break; 7824 case EXCP_BKPT: 7825 if (semihosting_enabled()) { 7826 int nr; 7827 nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff; 7828 if (nr == 0xab) { 7829 env->regs[15] += 2; 7830 qemu_log_mask(CPU_LOG_INT, 7831 "...handling as semihosting call 0x%x\n", 7832 env->regs[0]); 7833 env->regs[0] = do_arm_semihosting(env); 7834 return; 7835 } 7836 } 7837 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false); 7838 break; 7839 case EXCP_IRQ: 7840 break; 7841 case EXCP_EXCEPTION_EXIT: 7842 if (env->regs[15] < EXC_RETURN_MIN_MAGIC) { 7843 /* Must be v8M security extension function return */ 7844 assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC); 7845 assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); 7846 if (do_v7m_function_return(cpu)) { 7847 return; 7848 } 7849 } else { 7850 do_v7m_exception_exit(cpu); 7851 return; 7852 } 7853 break; 7854 default: 7855 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 7856 return; /* Never happens. Keep compiler happy. */ 7857 } 7858 7859 if (arm_feature(env, ARM_FEATURE_V8)) { 7860 lr = R_V7M_EXCRET_RES1_MASK | 7861 R_V7M_EXCRET_DCRS_MASK | 7862 R_V7M_EXCRET_FTYPE_MASK; 7863 /* The S bit indicates whether we should return to Secure 7864 * or NonSecure (ie our current state). 7865 * The ES bit indicates whether we're taking this exception 7866 * to Secure or NonSecure (ie our target state). We set it 7867 * later, in v7m_exception_taken(). 7868 * The SPSEL bit is also set in v7m_exception_taken() for v8M. 7869 * This corresponds to the ARM ARM pseudocode for v8M setting 7870 * some LR bits in PushStack() and some in ExceptionTaken(); 7871 * the distinction matters for the tailchain cases where we 7872 * can take an exception without pushing the stack. 7873 */ 7874 if (env->v7m.secure) { 7875 lr |= R_V7M_EXCRET_S_MASK; 7876 } 7877 } else { 7878 lr = R_V7M_EXCRET_RES1_MASK | 7879 R_V7M_EXCRET_S_MASK | 7880 R_V7M_EXCRET_DCRS_MASK | 7881 R_V7M_EXCRET_FTYPE_MASK | 7882 R_V7M_EXCRET_ES_MASK; 7883 if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) { 7884 lr |= R_V7M_EXCRET_SPSEL_MASK; 7885 } 7886 } 7887 if (!arm_v7m_is_handler_mode(env)) { 7888 lr |= R_V7M_EXCRET_MODE_MASK; 7889 } 7890 7891 ignore_stackfaults = v7m_push_stack(cpu); 7892 v7m_exception_taken(cpu, lr, false, ignore_stackfaults); 7893 } 7894 7895 /* Function used to synchronize QEMU's AArch64 register set with AArch32 7896 * register set. This is necessary when switching between AArch32 and AArch64 7897 * execution state. 7898 */ 7899 void aarch64_sync_32_to_64(CPUARMState *env) 7900 { 7901 int i; 7902 uint32_t mode = env->uncached_cpsr & CPSR_M; 7903 7904 /* We can blanket copy R[0:7] to X[0:7] */ 7905 for (i = 0; i < 8; i++) { 7906 env->xregs[i] = env->regs[i]; 7907 } 7908 7909 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. 7910 * Otherwise, they come from the banked user regs. 7911 */ 7912 if (mode == ARM_CPU_MODE_FIQ) { 7913 for (i = 8; i < 13; i++) { 7914 env->xregs[i] = env->usr_regs[i - 8]; 7915 } 7916 } else { 7917 for (i = 8; i < 13; i++) { 7918 env->xregs[i] = env->regs[i]; 7919 } 7920 } 7921 7922 /* Registers x13-x23 are the various mode SP and FP registers. Registers 7923 * r13 and r14 are only copied if we are in that mode, otherwise we copy 7924 * from the mode banked register. 7925 */ 7926 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 7927 env->xregs[13] = env->regs[13]; 7928 env->xregs[14] = env->regs[14]; 7929 } else { 7930 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; 7931 /* HYP is an exception in that it is copied from r14 */ 7932 if (mode == ARM_CPU_MODE_HYP) { 7933 env->xregs[14] = env->regs[14]; 7934 } else { 7935 env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)]; 7936 } 7937 } 7938 7939 if (mode == ARM_CPU_MODE_HYP) { 7940 env->xregs[15] = env->regs[13]; 7941 } else { 7942 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; 7943 } 7944 7945 if (mode == ARM_CPU_MODE_IRQ) { 7946 env->xregs[16] = env->regs[14]; 7947 env->xregs[17] = env->regs[13]; 7948 } else { 7949 env->xregs[16] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)]; 7950 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; 7951 } 7952 7953 if (mode == ARM_CPU_MODE_SVC) { 7954 env->xregs[18] = env->regs[14]; 7955 env->xregs[19] = env->regs[13]; 7956 } else { 7957 env->xregs[18] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)]; 7958 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; 7959 } 7960 7961 if (mode == ARM_CPU_MODE_ABT) { 7962 env->xregs[20] = env->regs[14]; 7963 env->xregs[21] = env->regs[13]; 7964 } else { 7965 env->xregs[20] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)]; 7966 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; 7967 } 7968 7969 if (mode == ARM_CPU_MODE_UND) { 7970 env->xregs[22] = env->regs[14]; 7971 env->xregs[23] = env->regs[13]; 7972 } else { 7973 env->xregs[22] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)]; 7974 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; 7975 } 7976 7977 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 7978 * mode, then we can copy from r8-r14. Otherwise, we copy from the 7979 * FIQ bank for r8-r14. 7980 */ 7981 if (mode == ARM_CPU_MODE_FIQ) { 7982 for (i = 24; i < 31; i++) { 7983 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ 7984 } 7985 } else { 7986 for (i = 24; i < 29; i++) { 7987 env->xregs[i] = env->fiq_regs[i - 24]; 7988 } 7989 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; 7990 env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)]; 7991 } 7992 7993 env->pc = env->regs[15]; 7994 } 7995 7996 /* Function used to synchronize QEMU's AArch32 register set with AArch64 7997 * register set. This is necessary when switching between AArch32 and AArch64 7998 * execution state. 7999 */ 8000 void aarch64_sync_64_to_32(CPUARMState *env) 8001 { 8002 int i; 8003 uint32_t mode = env->uncached_cpsr & CPSR_M; 8004 8005 /* We can blanket copy X[0:7] to R[0:7] */ 8006 for (i = 0; i < 8; i++) { 8007 env->regs[i] = env->xregs[i]; 8008 } 8009 8010 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. 8011 * Otherwise, we copy x8-x12 into the banked user regs. 8012 */ 8013 if (mode == ARM_CPU_MODE_FIQ) { 8014 for (i = 8; i < 13; i++) { 8015 env->usr_regs[i - 8] = env->xregs[i]; 8016 } 8017 } else { 8018 for (i = 8; i < 13; i++) { 8019 env->regs[i] = env->xregs[i]; 8020 } 8021 } 8022 8023 /* Registers r13 & r14 depend on the current mode. 8024 * If we are in a given mode, we copy the corresponding x registers to r13 8025 * and r14. Otherwise, we copy the x register to the banked r13 and r14 8026 * for the mode. 8027 */ 8028 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 8029 env->regs[13] = env->xregs[13]; 8030 env->regs[14] = env->xregs[14]; 8031 } else { 8032 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; 8033 8034 /* HYP is an exception in that it does not have its own banked r14 but 8035 * shares the USR r14 8036 */ 8037 if (mode == ARM_CPU_MODE_HYP) { 8038 env->regs[14] = env->xregs[14]; 8039 } else { 8040 env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; 8041 } 8042 } 8043 8044 if (mode == ARM_CPU_MODE_HYP) { 8045 env->regs[13] = env->xregs[15]; 8046 } else { 8047 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; 8048 } 8049 8050 if (mode == ARM_CPU_MODE_IRQ) { 8051 env->regs[14] = env->xregs[16]; 8052 env->regs[13] = env->xregs[17]; 8053 } else { 8054 env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; 8055 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; 8056 } 8057 8058 if (mode == ARM_CPU_MODE_SVC) { 8059 env->regs[14] = env->xregs[18]; 8060 env->regs[13] = env->xregs[19]; 8061 } else { 8062 env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; 8063 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; 8064 } 8065 8066 if (mode == ARM_CPU_MODE_ABT) { 8067 env->regs[14] = env->xregs[20]; 8068 env->regs[13] = env->xregs[21]; 8069 } else { 8070 env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; 8071 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; 8072 } 8073 8074 if (mode == ARM_CPU_MODE_UND) { 8075 env->regs[14] = env->xregs[22]; 8076 env->regs[13] = env->xregs[23]; 8077 } else { 8078 env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; 8079 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; 8080 } 8081 8082 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 8083 * mode, then we can copy to r8-r14. Otherwise, we copy to the 8084 * FIQ bank for r8-r14. 8085 */ 8086 if (mode == ARM_CPU_MODE_FIQ) { 8087 for (i = 24; i < 31; i++) { 8088 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ 8089 } 8090 } else { 8091 for (i = 24; i < 29; i++) { 8092 env->fiq_regs[i - 24] = env->xregs[i]; 8093 } 8094 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; 8095 env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; 8096 } 8097 8098 env->regs[15] = env->pc; 8099 } 8100 8101 static void take_aarch32_exception(CPUARMState *env, int new_mode, 8102 uint32_t mask, uint32_t offset, 8103 uint32_t newpc) 8104 { 8105 /* Change the CPU state so as to actually take the exception. */ 8106 switch_mode(env, new_mode); 8107 /* 8108 * For exceptions taken to AArch32 we must clear the SS bit in both 8109 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. 8110 */ 8111 env->uncached_cpsr &= ~PSTATE_SS; 8112 env->spsr = cpsr_read(env); 8113 /* Clear IT bits. */ 8114 env->condexec_bits = 0; 8115 /* Switch to the new mode, and to the correct instruction set. */ 8116 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 8117 /* Set new mode endianness */ 8118 env->uncached_cpsr &= ~CPSR_E; 8119 if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) { 8120 env->uncached_cpsr |= CPSR_E; 8121 } 8122 /* J and IL must always be cleared for exception entry */ 8123 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); 8124 env->daif |= mask; 8125 8126 if (new_mode == ARM_CPU_MODE_HYP) { 8127 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; 8128 env->elr_el[2] = env->regs[15]; 8129 } else { 8130 /* 8131 * this is a lie, as there was no c1_sys on V4T/V5, but who cares 8132 * and we should just guard the thumb mode on V4 8133 */ 8134 if (arm_feature(env, ARM_FEATURE_V4T)) { 8135 env->thumb = 8136 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; 8137 } 8138 env->regs[14] = env->regs[15] + offset; 8139 } 8140 env->regs[15] = newpc; 8141 } 8142 8143 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) 8144 { 8145 /* 8146 * Handle exception entry to Hyp mode; this is sufficiently 8147 * different to entry to other AArch32 modes that we handle it 8148 * separately here. 8149 * 8150 * The vector table entry used is always the 0x14 Hyp mode entry point, 8151 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp. 8152 * The offset applied to the preferred return address is always zero 8153 * (see DDI0487C.a section G1.12.3). 8154 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. 8155 */ 8156 uint32_t addr, mask; 8157 ARMCPU *cpu = ARM_CPU(cs); 8158 CPUARMState *env = &cpu->env; 8159 8160 switch (cs->exception_index) { 8161 case EXCP_UDEF: 8162 addr = 0x04; 8163 break; 8164 case EXCP_SWI: 8165 addr = 0x14; 8166 break; 8167 case EXCP_BKPT: 8168 /* Fall through to prefetch abort. */ 8169 case EXCP_PREFETCH_ABORT: 8170 env->cp15.ifar_s = env->exception.vaddress; 8171 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n", 8172 (uint32_t)env->exception.vaddress); 8173 addr = 0x0c; 8174 break; 8175 case EXCP_DATA_ABORT: 8176 env->cp15.dfar_s = env->exception.vaddress; 8177 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n", 8178 (uint32_t)env->exception.vaddress); 8179 addr = 0x10; 8180 break; 8181 case EXCP_IRQ: 8182 addr = 0x18; 8183 break; 8184 case EXCP_FIQ: 8185 addr = 0x1c; 8186 break; 8187 case EXCP_HVC: 8188 addr = 0x08; 8189 break; 8190 case EXCP_HYP_TRAP: 8191 addr = 0x14; 8192 default: 8193 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8194 } 8195 8196 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { 8197 env->cp15.esr_el[2] = env->exception.syndrome; 8198 } 8199 8200 if (arm_current_el(env) != 2 && addr < 0x14) { 8201 addr = 0x14; 8202 } 8203 8204 mask = 0; 8205 if (!(env->cp15.scr_el3 & SCR_EA)) { 8206 mask |= CPSR_A; 8207 } 8208 if (!(env->cp15.scr_el3 & SCR_IRQ)) { 8209 mask |= CPSR_I; 8210 } 8211 if (!(env->cp15.scr_el3 & SCR_FIQ)) { 8212 mask |= CPSR_F; 8213 } 8214 8215 addr += env->cp15.hvbar; 8216 8217 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr); 8218 } 8219 8220 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) 8221 { 8222 ARMCPU *cpu = ARM_CPU(cs); 8223 CPUARMState *env = &cpu->env; 8224 uint32_t addr; 8225 uint32_t mask; 8226 int new_mode; 8227 uint32_t offset; 8228 uint32_t moe; 8229 8230 /* If this is a debug exception we must update the DBGDSCR.MOE bits */ 8231 switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) { 8232 case EC_BREAKPOINT: 8233 case EC_BREAKPOINT_SAME_EL: 8234 moe = 1; 8235 break; 8236 case EC_WATCHPOINT: 8237 case EC_WATCHPOINT_SAME_EL: 8238 moe = 10; 8239 break; 8240 case EC_AA32_BKPT: 8241 moe = 3; 8242 break; 8243 case EC_VECTORCATCH: 8244 moe = 5; 8245 break; 8246 default: 8247 moe = 0; 8248 break; 8249 } 8250 8251 if (moe) { 8252 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); 8253 } 8254 8255 if (env->exception.target_el == 2) { 8256 arm_cpu_do_interrupt_aarch32_hyp(cs); 8257 return; 8258 } 8259 8260 /* TODO: Vectored interrupt controller. */ 8261 switch (cs->exception_index) { 8262 case EXCP_UDEF: 8263 new_mode = ARM_CPU_MODE_UND; 8264 addr = 0x04; 8265 mask = CPSR_I; 8266 if (env->thumb) 8267 offset = 2; 8268 else 8269 offset = 4; 8270 break; 8271 case EXCP_SWI: 8272 new_mode = ARM_CPU_MODE_SVC; 8273 addr = 0x08; 8274 mask = CPSR_I; 8275 /* The PC already points to the next instruction. */ 8276 offset = 0; 8277 break; 8278 case EXCP_BKPT: 8279 /* Fall through to prefetch abort. */ 8280 case EXCP_PREFETCH_ABORT: 8281 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); 8282 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); 8283 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", 8284 env->exception.fsr, (uint32_t)env->exception.vaddress); 8285 new_mode = ARM_CPU_MODE_ABT; 8286 addr = 0x0c; 8287 mask = CPSR_A | CPSR_I; 8288 offset = 4; 8289 break; 8290 case EXCP_DATA_ABORT: 8291 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 8292 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); 8293 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", 8294 env->exception.fsr, 8295 (uint32_t)env->exception.vaddress); 8296 new_mode = ARM_CPU_MODE_ABT; 8297 addr = 0x10; 8298 mask = CPSR_A | CPSR_I; 8299 offset = 8; 8300 break; 8301 case EXCP_IRQ: 8302 new_mode = ARM_CPU_MODE_IRQ; 8303 addr = 0x18; 8304 /* Disable IRQ and imprecise data aborts. */ 8305 mask = CPSR_A | CPSR_I; 8306 offset = 4; 8307 if (env->cp15.scr_el3 & SCR_IRQ) { 8308 /* IRQ routed to monitor mode */ 8309 new_mode = ARM_CPU_MODE_MON; 8310 mask |= CPSR_F; 8311 } 8312 break; 8313 case EXCP_FIQ: 8314 new_mode = ARM_CPU_MODE_FIQ; 8315 addr = 0x1c; 8316 /* Disable FIQ, IRQ and imprecise data aborts. */ 8317 mask = CPSR_A | CPSR_I | CPSR_F; 8318 if (env->cp15.scr_el3 & SCR_FIQ) { 8319 /* FIQ routed to monitor mode */ 8320 new_mode = ARM_CPU_MODE_MON; 8321 } 8322 offset = 4; 8323 break; 8324 case EXCP_VIRQ: 8325 new_mode = ARM_CPU_MODE_IRQ; 8326 addr = 0x18; 8327 /* Disable IRQ and imprecise data aborts. */ 8328 mask = CPSR_A | CPSR_I; 8329 offset = 4; 8330 break; 8331 case EXCP_VFIQ: 8332 new_mode = ARM_CPU_MODE_FIQ; 8333 addr = 0x1c; 8334 /* Disable FIQ, IRQ and imprecise data aborts. */ 8335 mask = CPSR_A | CPSR_I | CPSR_F; 8336 offset = 4; 8337 break; 8338 case EXCP_SMC: 8339 new_mode = ARM_CPU_MODE_MON; 8340 addr = 0x08; 8341 mask = CPSR_A | CPSR_I | CPSR_F; 8342 offset = 0; 8343 break; 8344 default: 8345 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8346 return; /* Never happens. Keep compiler happy. */ 8347 } 8348 8349 if (new_mode == ARM_CPU_MODE_MON) { 8350 addr += env->cp15.mvbar; 8351 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { 8352 /* High vectors. When enabled, base address cannot be remapped. */ 8353 addr += 0xffff0000; 8354 } else { 8355 /* ARM v7 architectures provide a vector base address register to remap 8356 * the interrupt vector table. 8357 * This register is only followed in non-monitor mode, and is banked. 8358 * Note: only bits 31:5 are valid. 8359 */ 8360 addr += A32_BANKED_CURRENT_REG_GET(env, vbar); 8361 } 8362 8363 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 8364 env->cp15.scr_el3 &= ~SCR_NS; 8365 } 8366 8367 take_aarch32_exception(env, new_mode, mask, offset, addr); 8368 } 8369 8370 /* Handle exception entry to a target EL which is using AArch64 */ 8371 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) 8372 { 8373 ARMCPU *cpu = ARM_CPU(cs); 8374 CPUARMState *env = &cpu->env; 8375 unsigned int new_el = env->exception.target_el; 8376 target_ulong addr = env->cp15.vbar_el[new_el]; 8377 unsigned int new_mode = aarch64_pstate_mode(new_el, true); 8378 unsigned int cur_el = arm_current_el(env); 8379 8380 /* 8381 * Note that new_el can never be 0. If cur_el is 0, then 8382 * el0_a64 is is_a64(), else el0_a64 is ignored. 8383 */ 8384 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); 8385 8386 if (cur_el < new_el) { 8387 /* Entry vector offset depends on whether the implemented EL 8388 * immediately lower than the target level is using AArch32 or AArch64 8389 */ 8390 bool is_aa64; 8391 8392 switch (new_el) { 8393 case 3: 8394 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; 8395 break; 8396 case 2: 8397 is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0; 8398 break; 8399 case 1: 8400 is_aa64 = is_a64(env); 8401 break; 8402 default: 8403 g_assert_not_reached(); 8404 } 8405 8406 if (is_aa64) { 8407 addr += 0x400; 8408 } else { 8409 addr += 0x600; 8410 } 8411 } else if (pstate_read(env) & PSTATE_SP) { 8412 addr += 0x200; 8413 } 8414 8415 switch (cs->exception_index) { 8416 case EXCP_PREFETCH_ABORT: 8417 case EXCP_DATA_ABORT: 8418 env->cp15.far_el[new_el] = env->exception.vaddress; 8419 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", 8420 env->cp15.far_el[new_el]); 8421 /* fall through */ 8422 case EXCP_BKPT: 8423 case EXCP_UDEF: 8424 case EXCP_SWI: 8425 case EXCP_HVC: 8426 case EXCP_HYP_TRAP: 8427 case EXCP_SMC: 8428 env->cp15.esr_el[new_el] = env->exception.syndrome; 8429 break; 8430 case EXCP_IRQ: 8431 case EXCP_VIRQ: 8432 addr += 0x80; 8433 break; 8434 case EXCP_FIQ: 8435 case EXCP_VFIQ: 8436 addr += 0x100; 8437 break; 8438 case EXCP_SEMIHOST: 8439 qemu_log_mask(CPU_LOG_INT, 8440 "...handling as semihosting call 0x%" PRIx64 "\n", 8441 env->xregs[0]); 8442 env->xregs[0] = do_arm_semihosting(env); 8443 return; 8444 default: 8445 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8446 } 8447 8448 if (is_a64(env)) { 8449 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env); 8450 aarch64_save_sp(env, arm_current_el(env)); 8451 env->elr_el[new_el] = env->pc; 8452 } else { 8453 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env); 8454 env->elr_el[new_el] = env->regs[15]; 8455 8456 aarch64_sync_32_to_64(env); 8457 8458 env->condexec_bits = 0; 8459 } 8460 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", 8461 env->elr_el[new_el]); 8462 8463 pstate_write(env, PSTATE_DAIF | new_mode); 8464 env->aarch64 = 1; 8465 aarch64_restore_sp(env, new_el); 8466 8467 env->pc = addr; 8468 8469 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", 8470 new_el, env->pc, pstate_read(env)); 8471 } 8472 8473 static inline bool check_for_semihosting(CPUState *cs) 8474 { 8475 /* Check whether this exception is a semihosting call; if so 8476 * then handle it and return true; otherwise return false. 8477 */ 8478 ARMCPU *cpu = ARM_CPU(cs); 8479 CPUARMState *env = &cpu->env; 8480 8481 if (is_a64(env)) { 8482 if (cs->exception_index == EXCP_SEMIHOST) { 8483 /* This is always the 64-bit semihosting exception. 8484 * The "is this usermode" and "is semihosting enabled" 8485 * checks have been done at translate time. 8486 */ 8487 qemu_log_mask(CPU_LOG_INT, 8488 "...handling as semihosting call 0x%" PRIx64 "\n", 8489 env->xregs[0]); 8490 env->xregs[0] = do_arm_semihosting(env); 8491 return true; 8492 } 8493 return false; 8494 } else { 8495 uint32_t imm; 8496 8497 /* Only intercept calls from privileged modes, to provide some 8498 * semblance of security. 8499 */ 8500 if (cs->exception_index != EXCP_SEMIHOST && 8501 (!semihosting_enabled() || 8502 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) { 8503 return false; 8504 } 8505 8506 switch (cs->exception_index) { 8507 case EXCP_SEMIHOST: 8508 /* This is always a semihosting call; the "is this usermode" 8509 * and "is semihosting enabled" checks have been done at 8510 * translate time. 8511 */ 8512 break; 8513 case EXCP_SWI: 8514 /* Check for semihosting interrupt. */ 8515 if (env->thumb) { 8516 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env)) 8517 & 0xff; 8518 if (imm == 0xab) { 8519 break; 8520 } 8521 } else { 8522 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env)) 8523 & 0xffffff; 8524 if (imm == 0x123456) { 8525 break; 8526 } 8527 } 8528 return false; 8529 case EXCP_BKPT: 8530 /* See if this is a semihosting syscall. */ 8531 if (env->thumb) { 8532 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) 8533 & 0xff; 8534 if (imm == 0xab) { 8535 env->regs[15] += 2; 8536 break; 8537 } 8538 } 8539 return false; 8540 default: 8541 return false; 8542 } 8543 8544 qemu_log_mask(CPU_LOG_INT, 8545 "...handling as semihosting call 0x%x\n", 8546 env->regs[0]); 8547 env->regs[0] = do_arm_semihosting(env); 8548 return true; 8549 } 8550 } 8551 8552 /* Handle a CPU exception for A and R profile CPUs. 8553 * Do any appropriate logging, handle PSCI calls, and then hand off 8554 * to the AArch64-entry or AArch32-entry function depending on the 8555 * target exception level's register width. 8556 */ 8557 void arm_cpu_do_interrupt(CPUState *cs) 8558 { 8559 ARMCPU *cpu = ARM_CPU(cs); 8560 CPUARMState *env = &cpu->env; 8561 unsigned int new_el = env->exception.target_el; 8562 8563 assert(!arm_feature(env, ARM_FEATURE_M)); 8564 8565 arm_log_exception(cs->exception_index); 8566 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), 8567 new_el); 8568 if (qemu_loglevel_mask(CPU_LOG_INT) 8569 && !excp_is_internal(cs->exception_index)) { 8570 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", 8571 env->exception.syndrome >> ARM_EL_EC_SHIFT, 8572 env->exception.syndrome); 8573 } 8574 8575 if (arm_is_psci_call(cpu, cs->exception_index)) { 8576 arm_handle_psci_call(cpu); 8577 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); 8578 return; 8579 } 8580 8581 /* Semihosting semantics depend on the register width of the 8582 * code that caused the exception, not the target exception level, 8583 * so must be handled here. 8584 */ 8585 if (check_for_semihosting(cs)) { 8586 return; 8587 } 8588 8589 /* Hooks may change global state so BQL should be held, also the 8590 * BQL needs to be held for any modification of 8591 * cs->interrupt_request. 8592 */ 8593 g_assert(qemu_mutex_iothread_locked()); 8594 8595 arm_call_pre_el_change_hook(cpu); 8596 8597 assert(!excp_is_internal(cs->exception_index)); 8598 if (arm_el_is_aa64(env, new_el)) { 8599 arm_cpu_do_interrupt_aarch64(cs); 8600 } else { 8601 arm_cpu_do_interrupt_aarch32(cs); 8602 } 8603 8604 arm_call_el_change_hook(cpu); 8605 8606 if (!kvm_enabled()) { 8607 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; 8608 } 8609 } 8610 8611 /* Return the exception level which controls this address translation regime */ 8612 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 8613 { 8614 switch (mmu_idx) { 8615 case ARMMMUIdx_S2NS: 8616 case ARMMMUIdx_S1E2: 8617 return 2; 8618 case ARMMMUIdx_S1E3: 8619 return 3; 8620 case ARMMMUIdx_S1SE0: 8621 return arm_el_is_aa64(env, 3) ? 1 : 3; 8622 case ARMMMUIdx_S1SE1: 8623 case ARMMMUIdx_S1NSE0: 8624 case ARMMMUIdx_S1NSE1: 8625 case ARMMMUIdx_MPrivNegPri: 8626 case ARMMMUIdx_MUserNegPri: 8627 case ARMMMUIdx_MPriv: 8628 case ARMMMUIdx_MUser: 8629 case ARMMMUIdx_MSPrivNegPri: 8630 case ARMMMUIdx_MSUserNegPri: 8631 case ARMMMUIdx_MSPriv: 8632 case ARMMMUIdx_MSUser: 8633 return 1; 8634 default: 8635 g_assert_not_reached(); 8636 } 8637 } 8638 8639 /* Return the SCTLR value which controls this address translation regime */ 8640 static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 8641 { 8642 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 8643 } 8644 8645 /* Return true if the specified stage of address translation is disabled */ 8646 static inline bool regime_translation_disabled(CPUARMState *env, 8647 ARMMMUIdx mmu_idx) 8648 { 8649 if (arm_feature(env, ARM_FEATURE_M)) { 8650 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & 8651 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 8652 case R_V7M_MPU_CTRL_ENABLE_MASK: 8653 /* Enabled, but not for HardFault and NMI */ 8654 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 8655 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 8656 /* Enabled for all cases */ 8657 return false; 8658 case 0: 8659 default: 8660 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 8661 * we warned about that in armv7m_nvic.c when the guest set it. 8662 */ 8663 return true; 8664 } 8665 } 8666 8667 if (mmu_idx == ARMMMUIdx_S2NS) { 8668 return (env->cp15.hcr_el2 & HCR_VM) == 0; 8669 } 8670 8671 if (env->cp15.hcr_el2 & HCR_TGE) { 8672 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */ 8673 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) { 8674 return true; 8675 } 8676 } 8677 8678 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 8679 } 8680 8681 static inline bool regime_translation_big_endian(CPUARMState *env, 8682 ARMMMUIdx mmu_idx) 8683 { 8684 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 8685 } 8686 8687 /* Return the TCR controlling this translation regime */ 8688 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 8689 { 8690 if (mmu_idx == ARMMMUIdx_S2NS) { 8691 return &env->cp15.vtcr_el2; 8692 } 8693 return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; 8694 } 8695 8696 /* Convert a possible stage1+2 MMU index into the appropriate 8697 * stage 1 MMU index 8698 */ 8699 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 8700 { 8701 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 8702 mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0); 8703 } 8704 return mmu_idx; 8705 } 8706 8707 /* Returns TBI0 value for current regime el */ 8708 uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx) 8709 { 8710 TCR *tcr; 8711 uint32_t el; 8712 8713 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert 8714 * a stage 1+2 mmu index into the appropriate stage 1 mmu index. 8715 */ 8716 mmu_idx = stage_1_mmu_idx(mmu_idx); 8717 8718 tcr = regime_tcr(env, mmu_idx); 8719 el = regime_el(env, mmu_idx); 8720 8721 if (el > 1) { 8722 return extract64(tcr->raw_tcr, 20, 1); 8723 } else { 8724 return extract64(tcr->raw_tcr, 37, 1); 8725 } 8726 } 8727 8728 /* Returns TBI1 value for current regime el */ 8729 uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx) 8730 { 8731 TCR *tcr; 8732 uint32_t el; 8733 8734 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert 8735 * a stage 1+2 mmu index into the appropriate stage 1 mmu index. 8736 */ 8737 mmu_idx = stage_1_mmu_idx(mmu_idx); 8738 8739 tcr = regime_tcr(env, mmu_idx); 8740 el = regime_el(env, mmu_idx); 8741 8742 if (el > 1) { 8743 return 0; 8744 } else { 8745 return extract64(tcr->raw_tcr, 38, 1); 8746 } 8747 } 8748 8749 /* Return the TTBR associated with this translation regime */ 8750 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, 8751 int ttbrn) 8752 { 8753 if (mmu_idx == ARMMMUIdx_S2NS) { 8754 return env->cp15.vttbr_el2; 8755 } 8756 if (ttbrn == 0) { 8757 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 8758 } else { 8759 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 8760 } 8761 } 8762 8763 /* Return true if the translation regime is using LPAE format page tables */ 8764 static inline bool regime_using_lpae_format(CPUARMState *env, 8765 ARMMMUIdx mmu_idx) 8766 { 8767 int el = regime_el(env, mmu_idx); 8768 if (el == 2 || arm_el_is_aa64(env, el)) { 8769 return true; 8770 } 8771 if (arm_feature(env, ARM_FEATURE_LPAE) 8772 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { 8773 return true; 8774 } 8775 return false; 8776 } 8777 8778 /* Returns true if the stage 1 translation regime is using LPAE format page 8779 * tables. Used when raising alignment exceptions, whose FSR changes depending 8780 * on whether the long or short descriptor format is in use. */ 8781 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 8782 { 8783 mmu_idx = stage_1_mmu_idx(mmu_idx); 8784 8785 return regime_using_lpae_format(env, mmu_idx); 8786 } 8787 8788 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 8789 { 8790 switch (mmu_idx) { 8791 case ARMMMUIdx_S1SE0: 8792 case ARMMMUIdx_S1NSE0: 8793 case ARMMMUIdx_MUser: 8794 case ARMMMUIdx_MSUser: 8795 case ARMMMUIdx_MUserNegPri: 8796 case ARMMMUIdx_MSUserNegPri: 8797 return true; 8798 default: 8799 return false; 8800 case ARMMMUIdx_S12NSE0: 8801 case ARMMMUIdx_S12NSE1: 8802 g_assert_not_reached(); 8803 } 8804 } 8805 8806 /* Translate section/page access permissions to page 8807 * R/W protection flags 8808 * 8809 * @env: CPUARMState 8810 * @mmu_idx: MMU index indicating required translation regime 8811 * @ap: The 3-bit access permissions (AP[2:0]) 8812 * @domain_prot: The 2-bit domain access permissions 8813 */ 8814 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 8815 int ap, int domain_prot) 8816 { 8817 bool is_user = regime_is_user(env, mmu_idx); 8818 8819 if (domain_prot == 3) { 8820 return PAGE_READ | PAGE_WRITE; 8821 } 8822 8823 switch (ap) { 8824 case 0: 8825 if (arm_feature(env, ARM_FEATURE_V7)) { 8826 return 0; 8827 } 8828 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 8829 case SCTLR_S: 8830 return is_user ? 0 : PAGE_READ; 8831 case SCTLR_R: 8832 return PAGE_READ; 8833 default: 8834 return 0; 8835 } 8836 case 1: 8837 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 8838 case 2: 8839 if (is_user) { 8840 return PAGE_READ; 8841 } else { 8842 return PAGE_READ | PAGE_WRITE; 8843 } 8844 case 3: 8845 return PAGE_READ | PAGE_WRITE; 8846 case 4: /* Reserved. */ 8847 return 0; 8848 case 5: 8849 return is_user ? 0 : PAGE_READ; 8850 case 6: 8851 return PAGE_READ; 8852 case 7: 8853 if (!arm_feature(env, ARM_FEATURE_V6K)) { 8854 return 0; 8855 } 8856 return PAGE_READ; 8857 default: 8858 g_assert_not_reached(); 8859 } 8860 } 8861 8862 /* Translate section/page access permissions to page 8863 * R/W protection flags. 8864 * 8865 * @ap: The 2-bit simple AP (AP[2:1]) 8866 * @is_user: TRUE if accessing from PL0 8867 */ 8868 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 8869 { 8870 switch (ap) { 8871 case 0: 8872 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 8873 case 1: 8874 return PAGE_READ | PAGE_WRITE; 8875 case 2: 8876 return is_user ? 0 : PAGE_READ; 8877 case 3: 8878 return PAGE_READ; 8879 default: 8880 g_assert_not_reached(); 8881 } 8882 } 8883 8884 static inline int 8885 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 8886 { 8887 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 8888 } 8889 8890 /* Translate S2 section/page access permissions to protection flags 8891 * 8892 * @env: CPUARMState 8893 * @s2ap: The 2-bit stage2 access permissions (S2AP) 8894 * @xn: XN (execute-never) bit 8895 */ 8896 static int get_S2prot(CPUARMState *env, int s2ap, int xn) 8897 { 8898 int prot = 0; 8899 8900 if (s2ap & 1) { 8901 prot |= PAGE_READ; 8902 } 8903 if (s2ap & 2) { 8904 prot |= PAGE_WRITE; 8905 } 8906 if (!xn) { 8907 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 8908 prot |= PAGE_EXEC; 8909 } 8910 } 8911 return prot; 8912 } 8913 8914 /* Translate section/page access permissions to protection flags 8915 * 8916 * @env: CPUARMState 8917 * @mmu_idx: MMU index indicating required translation regime 8918 * @is_aa64: TRUE if AArch64 8919 * @ap: The 2-bit simple AP (AP[2:1]) 8920 * @ns: NS (non-secure) bit 8921 * @xn: XN (execute-never) bit 8922 * @pxn: PXN (privileged execute-never) bit 8923 */ 8924 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 8925 int ap, int ns, int xn, int pxn) 8926 { 8927 bool is_user = regime_is_user(env, mmu_idx); 8928 int prot_rw, user_rw; 8929 bool have_wxn; 8930 int wxn = 0; 8931 8932 assert(mmu_idx != ARMMMUIdx_S2NS); 8933 8934 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 8935 if (is_user) { 8936 prot_rw = user_rw; 8937 } else { 8938 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 8939 } 8940 8941 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { 8942 return prot_rw; 8943 } 8944 8945 /* TODO have_wxn should be replaced with 8946 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 8947 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 8948 * compatible processors have EL2, which is required for [U]WXN. 8949 */ 8950 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 8951 8952 if (have_wxn) { 8953 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 8954 } 8955 8956 if (is_aa64) { 8957 switch (regime_el(env, mmu_idx)) { 8958 case 1: 8959 if (!is_user) { 8960 xn = pxn || (user_rw & PAGE_WRITE); 8961 } 8962 break; 8963 case 2: 8964 case 3: 8965 break; 8966 } 8967 } else if (arm_feature(env, ARM_FEATURE_V7)) { 8968 switch (regime_el(env, mmu_idx)) { 8969 case 1: 8970 case 3: 8971 if (is_user) { 8972 xn = xn || !(user_rw & PAGE_READ); 8973 } else { 8974 int uwxn = 0; 8975 if (have_wxn) { 8976 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 8977 } 8978 xn = xn || !(prot_rw & PAGE_READ) || pxn || 8979 (uwxn && (user_rw & PAGE_WRITE)); 8980 } 8981 break; 8982 case 2: 8983 break; 8984 } 8985 } else { 8986 xn = wxn = 0; 8987 } 8988 8989 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 8990 return prot_rw; 8991 } 8992 return prot_rw | PAGE_EXEC; 8993 } 8994 8995 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 8996 uint32_t *table, uint32_t address) 8997 { 8998 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 8999 TCR *tcr = regime_tcr(env, mmu_idx); 9000 9001 if (address & tcr->mask) { 9002 if (tcr->raw_tcr & TTBCR_PD1) { 9003 /* Translation table walk disabled for TTBR1 */ 9004 return false; 9005 } 9006 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 9007 } else { 9008 if (tcr->raw_tcr & TTBCR_PD0) { 9009 /* Translation table walk disabled for TTBR0 */ 9010 return false; 9011 } 9012 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; 9013 } 9014 *table |= (address >> 18) & 0x3ffc; 9015 return true; 9016 } 9017 9018 /* Translate a S1 pagetable walk through S2 if needed. */ 9019 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, 9020 hwaddr addr, MemTxAttrs txattrs, 9021 ARMMMUFaultInfo *fi) 9022 { 9023 if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) && 9024 !regime_translation_disabled(env, ARMMMUIdx_S2NS)) { 9025 target_ulong s2size; 9026 hwaddr s2pa; 9027 int s2prot; 9028 int ret; 9029 9030 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa, 9031 &txattrs, &s2prot, &s2size, fi, NULL); 9032 if (ret) { 9033 assert(fi->type != ARMFault_None); 9034 fi->s2addr = addr; 9035 fi->stage2 = true; 9036 fi->s1ptw = true; 9037 return ~0; 9038 } 9039 addr = s2pa; 9040 } 9041 return addr; 9042 } 9043 9044 /* All loads done in the course of a page table walk go through here. */ 9045 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, 9046 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 9047 { 9048 ARMCPU *cpu = ARM_CPU(cs); 9049 CPUARMState *env = &cpu->env; 9050 MemTxAttrs attrs = {}; 9051 MemTxResult result = MEMTX_OK; 9052 AddressSpace *as; 9053 uint32_t data; 9054 9055 attrs.secure = is_secure; 9056 as = arm_addressspace(cs, attrs); 9057 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 9058 if (fi->s1ptw) { 9059 return 0; 9060 } 9061 if (regime_translation_big_endian(env, mmu_idx)) { 9062 data = address_space_ldl_be(as, addr, attrs, &result); 9063 } else { 9064 data = address_space_ldl_le(as, addr, attrs, &result); 9065 } 9066 if (result == MEMTX_OK) { 9067 return data; 9068 } 9069 fi->type = ARMFault_SyncExternalOnWalk; 9070 fi->ea = arm_extabort_type(result); 9071 return 0; 9072 } 9073 9074 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, 9075 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 9076 { 9077 ARMCPU *cpu = ARM_CPU(cs); 9078 CPUARMState *env = &cpu->env; 9079 MemTxAttrs attrs = {}; 9080 MemTxResult result = MEMTX_OK; 9081 AddressSpace *as; 9082 uint64_t data; 9083 9084 attrs.secure = is_secure; 9085 as = arm_addressspace(cs, attrs); 9086 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 9087 if (fi->s1ptw) { 9088 return 0; 9089 } 9090 if (regime_translation_big_endian(env, mmu_idx)) { 9091 data = address_space_ldq_be(as, addr, attrs, &result); 9092 } else { 9093 data = address_space_ldq_le(as, addr, attrs, &result); 9094 } 9095 if (result == MEMTX_OK) { 9096 return data; 9097 } 9098 fi->type = ARMFault_SyncExternalOnWalk; 9099 fi->ea = arm_extabort_type(result); 9100 return 0; 9101 } 9102 9103 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, 9104 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9105 hwaddr *phys_ptr, int *prot, 9106 target_ulong *page_size, 9107 ARMMMUFaultInfo *fi) 9108 { 9109 CPUState *cs = CPU(arm_env_get_cpu(env)); 9110 int level = 1; 9111 uint32_t table; 9112 uint32_t desc; 9113 int type; 9114 int ap; 9115 int domain = 0; 9116 int domain_prot; 9117 hwaddr phys_addr; 9118 uint32_t dacr; 9119 9120 /* Pagetable walk. */ 9121 /* Lookup l1 descriptor. */ 9122 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 9123 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 9124 fi->type = ARMFault_Translation; 9125 goto do_fault; 9126 } 9127 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 9128 mmu_idx, fi); 9129 if (fi->type != ARMFault_None) { 9130 goto do_fault; 9131 } 9132 type = (desc & 3); 9133 domain = (desc >> 5) & 0x0f; 9134 if (regime_el(env, mmu_idx) == 1) { 9135 dacr = env->cp15.dacr_ns; 9136 } else { 9137 dacr = env->cp15.dacr_s; 9138 } 9139 domain_prot = (dacr >> (domain * 2)) & 3; 9140 if (type == 0) { 9141 /* Section translation fault. */ 9142 fi->type = ARMFault_Translation; 9143 goto do_fault; 9144 } 9145 if (type != 2) { 9146 level = 2; 9147 } 9148 if (domain_prot == 0 || domain_prot == 2) { 9149 fi->type = ARMFault_Domain; 9150 goto do_fault; 9151 } 9152 if (type == 2) { 9153 /* 1Mb section. */ 9154 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 9155 ap = (desc >> 10) & 3; 9156 *page_size = 1024 * 1024; 9157 } else { 9158 /* Lookup l2 entry. */ 9159 if (type == 1) { 9160 /* Coarse pagetable. */ 9161 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 9162 } else { 9163 /* Fine pagetable. */ 9164 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 9165 } 9166 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 9167 mmu_idx, fi); 9168 if (fi->type != ARMFault_None) { 9169 goto do_fault; 9170 } 9171 switch (desc & 3) { 9172 case 0: /* Page translation fault. */ 9173 fi->type = ARMFault_Translation; 9174 goto do_fault; 9175 case 1: /* 64k page. */ 9176 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 9177 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 9178 *page_size = 0x10000; 9179 break; 9180 case 2: /* 4k page. */ 9181 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 9182 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 9183 *page_size = 0x1000; 9184 break; 9185 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 9186 if (type == 1) { 9187 /* ARMv6/XScale extended small page format */ 9188 if (arm_feature(env, ARM_FEATURE_XSCALE) 9189 || arm_feature(env, ARM_FEATURE_V6)) { 9190 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 9191 *page_size = 0x1000; 9192 } else { 9193 /* UNPREDICTABLE in ARMv5; we choose to take a 9194 * page translation fault. 9195 */ 9196 fi->type = ARMFault_Translation; 9197 goto do_fault; 9198 } 9199 } else { 9200 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 9201 *page_size = 0x400; 9202 } 9203 ap = (desc >> 4) & 3; 9204 break; 9205 default: 9206 /* Never happens, but compiler isn't smart enough to tell. */ 9207 abort(); 9208 } 9209 } 9210 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 9211 *prot |= *prot ? PAGE_EXEC : 0; 9212 if (!(*prot & (1 << access_type))) { 9213 /* Access permission fault. */ 9214 fi->type = ARMFault_Permission; 9215 goto do_fault; 9216 } 9217 *phys_ptr = phys_addr; 9218 return false; 9219 do_fault: 9220 fi->domain = domain; 9221 fi->level = level; 9222 return true; 9223 } 9224 9225 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, 9226 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9227 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 9228 target_ulong *page_size, ARMMMUFaultInfo *fi) 9229 { 9230 CPUState *cs = CPU(arm_env_get_cpu(env)); 9231 int level = 1; 9232 uint32_t table; 9233 uint32_t desc; 9234 uint32_t xn; 9235 uint32_t pxn = 0; 9236 int type; 9237 int ap; 9238 int domain = 0; 9239 int domain_prot; 9240 hwaddr phys_addr; 9241 uint32_t dacr; 9242 bool ns; 9243 9244 /* Pagetable walk. */ 9245 /* Lookup l1 descriptor. */ 9246 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 9247 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 9248 fi->type = ARMFault_Translation; 9249 goto do_fault; 9250 } 9251 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 9252 mmu_idx, fi); 9253 if (fi->type != ARMFault_None) { 9254 goto do_fault; 9255 } 9256 type = (desc & 3); 9257 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { 9258 /* Section translation fault, or attempt to use the encoding 9259 * which is Reserved on implementations without PXN. 9260 */ 9261 fi->type = ARMFault_Translation; 9262 goto do_fault; 9263 } 9264 if ((type == 1) || !(desc & (1 << 18))) { 9265 /* Page or Section. */ 9266 domain = (desc >> 5) & 0x0f; 9267 } 9268 if (regime_el(env, mmu_idx) == 1) { 9269 dacr = env->cp15.dacr_ns; 9270 } else { 9271 dacr = env->cp15.dacr_s; 9272 } 9273 if (type == 1) { 9274 level = 2; 9275 } 9276 domain_prot = (dacr >> (domain * 2)) & 3; 9277 if (domain_prot == 0 || domain_prot == 2) { 9278 /* Section or Page domain fault */ 9279 fi->type = ARMFault_Domain; 9280 goto do_fault; 9281 } 9282 if (type != 1) { 9283 if (desc & (1 << 18)) { 9284 /* Supersection. */ 9285 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 9286 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 9287 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 9288 *page_size = 0x1000000; 9289 } else { 9290 /* Section. */ 9291 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 9292 *page_size = 0x100000; 9293 } 9294 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 9295 xn = desc & (1 << 4); 9296 pxn = desc & 1; 9297 ns = extract32(desc, 19, 1); 9298 } else { 9299 if (arm_feature(env, ARM_FEATURE_PXN)) { 9300 pxn = (desc >> 2) & 1; 9301 } 9302 ns = extract32(desc, 3, 1); 9303 /* Lookup l2 entry. */ 9304 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 9305 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 9306 mmu_idx, fi); 9307 if (fi->type != ARMFault_None) { 9308 goto do_fault; 9309 } 9310 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 9311 switch (desc & 3) { 9312 case 0: /* Page translation fault. */ 9313 fi->type = ARMFault_Translation; 9314 goto do_fault; 9315 case 1: /* 64k page. */ 9316 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 9317 xn = desc & (1 << 15); 9318 *page_size = 0x10000; 9319 break; 9320 case 2: case 3: /* 4k page. */ 9321 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 9322 xn = desc & 1; 9323 *page_size = 0x1000; 9324 break; 9325 default: 9326 /* Never happens, but compiler isn't smart enough to tell. */ 9327 abort(); 9328 } 9329 } 9330 if (domain_prot == 3) { 9331 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 9332 } else { 9333 if (pxn && !regime_is_user(env, mmu_idx)) { 9334 xn = 1; 9335 } 9336 if (xn && access_type == MMU_INST_FETCH) { 9337 fi->type = ARMFault_Permission; 9338 goto do_fault; 9339 } 9340 9341 if (arm_feature(env, ARM_FEATURE_V6K) && 9342 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 9343 /* The simplified model uses AP[0] as an access control bit. */ 9344 if ((ap & 1) == 0) { 9345 /* Access flag fault. */ 9346 fi->type = ARMFault_AccessFlag; 9347 goto do_fault; 9348 } 9349 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 9350 } else { 9351 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 9352 } 9353 if (*prot && !xn) { 9354 *prot |= PAGE_EXEC; 9355 } 9356 if (!(*prot & (1 << access_type))) { 9357 /* Access permission fault. */ 9358 fi->type = ARMFault_Permission; 9359 goto do_fault; 9360 } 9361 } 9362 if (ns) { 9363 /* The NS bit will (as required by the architecture) have no effect if 9364 * the CPU doesn't support TZ or this is a non-secure translation 9365 * regime, because the attribute will already be non-secure. 9366 */ 9367 attrs->secure = false; 9368 } 9369 *phys_ptr = phys_addr; 9370 return false; 9371 do_fault: 9372 fi->domain = domain; 9373 fi->level = level; 9374 return true; 9375 } 9376 9377 /* 9378 * check_s2_mmu_setup 9379 * @cpu: ARMCPU 9380 * @is_aa64: True if the translation regime is in AArch64 state 9381 * @startlevel: Suggested starting level 9382 * @inputsize: Bitsize of IPAs 9383 * @stride: Page-table stride (See the ARM ARM) 9384 * 9385 * Returns true if the suggested S2 translation parameters are OK and 9386 * false otherwise. 9387 */ 9388 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, 9389 int inputsize, int stride) 9390 { 9391 const int grainsize = stride + 3; 9392 int startsizecheck; 9393 9394 /* Negative levels are never allowed. */ 9395 if (level < 0) { 9396 return false; 9397 } 9398 9399 startsizecheck = inputsize - ((3 - level) * stride + grainsize); 9400 if (startsizecheck < 1 || startsizecheck > stride + 4) { 9401 return false; 9402 } 9403 9404 if (is_aa64) { 9405 CPUARMState *env = &cpu->env; 9406 unsigned int pamax = arm_pamax(cpu); 9407 9408 switch (stride) { 9409 case 13: /* 64KB Pages. */ 9410 if (level == 0 || (level == 1 && pamax <= 42)) { 9411 return false; 9412 } 9413 break; 9414 case 11: /* 16KB Pages. */ 9415 if (level == 0 || (level == 1 && pamax <= 40)) { 9416 return false; 9417 } 9418 break; 9419 case 9: /* 4KB Pages. */ 9420 if (level == 0 && pamax <= 42) { 9421 return false; 9422 } 9423 break; 9424 default: 9425 g_assert_not_reached(); 9426 } 9427 9428 /* Inputsize checks. */ 9429 if (inputsize > pamax && 9430 (arm_el_is_aa64(env, 1) || inputsize > 40)) { 9431 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ 9432 return false; 9433 } 9434 } else { 9435 /* AArch32 only supports 4KB pages. Assert on that. */ 9436 assert(stride == 9); 9437 9438 if (level == 0) { 9439 return false; 9440 } 9441 } 9442 return true; 9443 } 9444 9445 /* Translate from the 4-bit stage 2 representation of 9446 * memory attributes (without cache-allocation hints) to 9447 * the 8-bit representation of the stage 1 MAIR registers 9448 * (which includes allocation hints). 9449 * 9450 * ref: shared/translation/attrs/S2AttrDecode() 9451 * .../S2ConvertAttrsHints() 9452 */ 9453 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) 9454 { 9455 uint8_t hiattr = extract32(s2attrs, 2, 2); 9456 uint8_t loattr = extract32(s2attrs, 0, 2); 9457 uint8_t hihint = 0, lohint = 0; 9458 9459 if (hiattr != 0) { /* normal memory */ 9460 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */ 9461 hiattr = loattr = 1; /* non-cacheable */ 9462 } else { 9463 if (hiattr != 1) { /* Write-through or write-back */ 9464 hihint = 3; /* RW allocate */ 9465 } 9466 if (loattr != 1) { /* Write-through or write-back */ 9467 lohint = 3; /* RW allocate */ 9468 } 9469 } 9470 } 9471 9472 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 9473 } 9474 9475 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 9476 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9477 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 9478 target_ulong *page_size_ptr, 9479 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 9480 { 9481 ARMCPU *cpu = arm_env_get_cpu(env); 9482 CPUState *cs = CPU(cpu); 9483 /* Read an LPAE long-descriptor translation table. */ 9484 ARMFaultType fault_type = ARMFault_Translation; 9485 uint32_t level; 9486 uint32_t epd = 0; 9487 int32_t t0sz, t1sz; 9488 uint32_t tg; 9489 uint64_t ttbr; 9490 int ttbr_select; 9491 hwaddr descaddr, indexmask, indexmask_grainsize; 9492 uint32_t tableattrs; 9493 target_ulong page_size; 9494 uint32_t attrs; 9495 int32_t stride = 9; 9496 int32_t addrsize; 9497 int inputsize; 9498 int32_t tbi = 0; 9499 TCR *tcr = regime_tcr(env, mmu_idx); 9500 int ap, ns, xn, pxn; 9501 uint32_t el = regime_el(env, mmu_idx); 9502 bool ttbr1_valid = true; 9503 uint64_t descaddrmask; 9504 bool aarch64 = arm_el_is_aa64(env, el); 9505 9506 /* TODO: 9507 * This code does not handle the different format TCR for VTCR_EL2. 9508 * This code also does not support shareability levels. 9509 * Attribute and permission bit handling should also be checked when adding 9510 * support for those page table walks. 9511 */ 9512 if (aarch64) { 9513 level = 0; 9514 addrsize = 64; 9515 if (el > 1) { 9516 if (mmu_idx != ARMMMUIdx_S2NS) { 9517 tbi = extract64(tcr->raw_tcr, 20, 1); 9518 } 9519 } else { 9520 if (extract64(address, 55, 1)) { 9521 tbi = extract64(tcr->raw_tcr, 38, 1); 9522 } else { 9523 tbi = extract64(tcr->raw_tcr, 37, 1); 9524 } 9525 } 9526 tbi *= 8; 9527 9528 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it 9529 * invalid. 9530 */ 9531 if (el > 1) { 9532 ttbr1_valid = false; 9533 } 9534 } else { 9535 level = 1; 9536 addrsize = 32; 9537 /* There is no TTBR1 for EL2 */ 9538 if (el == 2) { 9539 ttbr1_valid = false; 9540 } 9541 } 9542 9543 /* Determine whether this address is in the region controlled by 9544 * TTBR0 or TTBR1 (or if it is in neither region and should fault). 9545 * This is a Non-secure PL0/1 stage 1 translation, so controlled by 9546 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32: 9547 */ 9548 if (aarch64) { 9549 /* AArch64 translation. */ 9550 t0sz = extract32(tcr->raw_tcr, 0, 6); 9551 t0sz = MIN(t0sz, 39); 9552 t0sz = MAX(t0sz, 16); 9553 } else if (mmu_idx != ARMMMUIdx_S2NS) { 9554 /* AArch32 stage 1 translation. */ 9555 t0sz = extract32(tcr->raw_tcr, 0, 3); 9556 } else { 9557 /* AArch32 stage 2 translation. */ 9558 bool sext = extract32(tcr->raw_tcr, 4, 1); 9559 bool sign = extract32(tcr->raw_tcr, 3, 1); 9560 /* Address size is 40-bit for a stage 2 translation, 9561 * and t0sz can be negative (from -8 to 7), 9562 * so we need to adjust it to use the TTBR selecting logic below. 9563 */ 9564 addrsize = 40; 9565 t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8; 9566 9567 /* If the sign-extend bit is not the same as t0sz[3], the result 9568 * is unpredictable. Flag this as a guest error. */ 9569 if (sign != sext) { 9570 qemu_log_mask(LOG_GUEST_ERROR, 9571 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 9572 } 9573 } 9574 t1sz = extract32(tcr->raw_tcr, 16, 6); 9575 if (aarch64) { 9576 t1sz = MIN(t1sz, 39); 9577 t1sz = MAX(t1sz, 16); 9578 } 9579 if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) { 9580 /* there is a ttbr0 region and we are in it (high bits all zero) */ 9581 ttbr_select = 0; 9582 } else if (ttbr1_valid && t1sz && 9583 !extract64(~address, addrsize - t1sz, t1sz - tbi)) { 9584 /* there is a ttbr1 region and we are in it (high bits all one) */ 9585 ttbr_select = 1; 9586 } else if (!t0sz) { 9587 /* ttbr0 region is "everything not in the ttbr1 region" */ 9588 ttbr_select = 0; 9589 } else if (!t1sz && ttbr1_valid) { 9590 /* ttbr1 region is "everything not in the ttbr0 region" */ 9591 ttbr_select = 1; 9592 } else { 9593 /* in the gap between the two regions, this is a Translation fault */ 9594 fault_type = ARMFault_Translation; 9595 goto do_fault; 9596 } 9597 9598 /* Note that QEMU ignores shareability and cacheability attributes, 9599 * so we don't need to do anything with the SH, ORGN, IRGN fields 9600 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 9601 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 9602 * implement any ASID-like capability so we can ignore it (instead 9603 * we will always flush the TLB any time the ASID is changed). 9604 */ 9605 if (ttbr_select == 0) { 9606 ttbr = regime_ttbr(env, mmu_idx, 0); 9607 if (el < 2) { 9608 epd = extract32(tcr->raw_tcr, 7, 1); 9609 } 9610 inputsize = addrsize - t0sz; 9611 9612 tg = extract32(tcr->raw_tcr, 14, 2); 9613 if (tg == 1) { /* 64KB pages */ 9614 stride = 13; 9615 } 9616 if (tg == 2) { /* 16KB pages */ 9617 stride = 11; 9618 } 9619 } else { 9620 /* We should only be here if TTBR1 is valid */ 9621 assert(ttbr1_valid); 9622 9623 ttbr = regime_ttbr(env, mmu_idx, 1); 9624 epd = extract32(tcr->raw_tcr, 23, 1); 9625 inputsize = addrsize - t1sz; 9626 9627 tg = extract32(tcr->raw_tcr, 30, 2); 9628 if (tg == 3) { /* 64KB pages */ 9629 stride = 13; 9630 } 9631 if (tg == 1) { /* 16KB pages */ 9632 stride = 11; 9633 } 9634 } 9635 9636 /* Here we should have set up all the parameters for the translation: 9637 * inputsize, ttbr, epd, stride, tbi 9638 */ 9639 9640 if (epd) { 9641 /* Translation table walk disabled => Translation fault on TLB miss 9642 * Note: This is always 0 on 64-bit EL2 and EL3. 9643 */ 9644 goto do_fault; 9645 } 9646 9647 if (mmu_idx != ARMMMUIdx_S2NS) { 9648 /* The starting level depends on the virtual address size (which can 9649 * be up to 48 bits) and the translation granule size. It indicates 9650 * the number of strides (stride bits at a time) needed to 9651 * consume the bits of the input address. In the pseudocode this is: 9652 * level = 4 - RoundUp((inputsize - grainsize) / stride) 9653 * where their 'inputsize' is our 'inputsize', 'grainsize' is 9654 * our 'stride + 3' and 'stride' is our 'stride'. 9655 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 9656 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 9657 * = 4 - (inputsize - 4) / stride; 9658 */ 9659 level = 4 - (inputsize - 4) / stride; 9660 } else { 9661 /* For stage 2 translations the starting level is specified by the 9662 * VTCR_EL2.SL0 field (whose interpretation depends on the page size) 9663 */ 9664 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); 9665 uint32_t startlevel; 9666 bool ok; 9667 9668 if (!aarch64 || stride == 9) { 9669 /* AArch32 or 4KB pages */ 9670 startlevel = 2 - sl0; 9671 } else { 9672 /* 16KB or 64KB pages */ 9673 startlevel = 3 - sl0; 9674 } 9675 9676 /* Check that the starting level is valid. */ 9677 ok = check_s2_mmu_setup(cpu, aarch64, startlevel, 9678 inputsize, stride); 9679 if (!ok) { 9680 fault_type = ARMFault_Translation; 9681 goto do_fault; 9682 } 9683 level = startlevel; 9684 } 9685 9686 indexmask_grainsize = (1ULL << (stride + 3)) - 1; 9687 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; 9688 9689 /* Now we can extract the actual base address from the TTBR */ 9690 descaddr = extract64(ttbr, 0, 48); 9691 descaddr &= ~indexmask; 9692 9693 /* The address field in the descriptor goes up to bit 39 for ARMv7 9694 * but up to bit 47 for ARMv8, but we use the descaddrmask 9695 * up to bit 39 for AArch32, because we don't need other bits in that case 9696 * to construct next descriptor address (anyway they should be all zeroes). 9697 */ 9698 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & 9699 ~indexmask_grainsize; 9700 9701 /* Secure accesses start with the page table in secure memory and 9702 * can be downgraded to non-secure at any step. Non-secure accesses 9703 * remain non-secure. We implement this by just ORing in the NSTable/NS 9704 * bits at each step. 9705 */ 9706 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); 9707 for (;;) { 9708 uint64_t descriptor; 9709 bool nstable; 9710 9711 descaddr |= (address >> (stride * (4 - level))) & indexmask; 9712 descaddr &= ~7ULL; 9713 nstable = extract32(tableattrs, 4, 1); 9714 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); 9715 if (fi->type != ARMFault_None) { 9716 goto do_fault; 9717 } 9718 9719 if (!(descriptor & 1) || 9720 (!(descriptor & 2) && (level == 3))) { 9721 /* Invalid, or the Reserved level 3 encoding */ 9722 goto do_fault; 9723 } 9724 descaddr = descriptor & descaddrmask; 9725 9726 if ((descriptor & 2) && (level < 3)) { 9727 /* Table entry. The top five bits are attributes which may 9728 * propagate down through lower levels of the table (and 9729 * which are all arranged so that 0 means "no effect", so 9730 * we can gather them up by ORing in the bits at each level). 9731 */ 9732 tableattrs |= extract64(descriptor, 59, 5); 9733 level++; 9734 indexmask = indexmask_grainsize; 9735 continue; 9736 } 9737 /* Block entry at level 1 or 2, or page entry at level 3. 9738 * These are basically the same thing, although the number 9739 * of bits we pull in from the vaddr varies. 9740 */ 9741 page_size = (1ULL << ((stride * (4 - level)) + 3)); 9742 descaddr |= (address & (page_size - 1)); 9743 /* Extract attributes from the descriptor */ 9744 attrs = extract64(descriptor, 2, 10) 9745 | (extract64(descriptor, 52, 12) << 10); 9746 9747 if (mmu_idx == ARMMMUIdx_S2NS) { 9748 /* Stage 2 table descriptors do not include any attribute fields */ 9749 break; 9750 } 9751 /* Merge in attributes from table descriptors */ 9752 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ 9753 attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */ 9754 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 9755 * means "force PL1 access only", which means forcing AP[1] to 0. 9756 */ 9757 if (extract32(tableattrs, 2, 1)) { 9758 attrs &= ~(1 << 4); 9759 } 9760 attrs |= nstable << 3; /* NS */ 9761 break; 9762 } 9763 /* Here descaddr is the final physical address, and attributes 9764 * are all in attrs. 9765 */ 9766 fault_type = ARMFault_AccessFlag; 9767 if ((attrs & (1 << 8)) == 0) { 9768 /* Access flag */ 9769 goto do_fault; 9770 } 9771 9772 ap = extract32(attrs, 4, 2); 9773 xn = extract32(attrs, 12, 1); 9774 9775 if (mmu_idx == ARMMMUIdx_S2NS) { 9776 ns = true; 9777 *prot = get_S2prot(env, ap, xn); 9778 } else { 9779 ns = extract32(attrs, 3, 1); 9780 pxn = extract32(attrs, 11, 1); 9781 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); 9782 } 9783 9784 fault_type = ARMFault_Permission; 9785 if (!(*prot & (1 << access_type))) { 9786 goto do_fault; 9787 } 9788 9789 if (ns) { 9790 /* The NS bit will (as required by the architecture) have no effect if 9791 * the CPU doesn't support TZ or this is a non-secure translation 9792 * regime, because the attribute will already be non-secure. 9793 */ 9794 txattrs->secure = false; 9795 } 9796 9797 if (cacheattrs != NULL) { 9798 if (mmu_idx == ARMMMUIdx_S2NS) { 9799 cacheattrs->attrs = convert_stage2_attrs(env, 9800 extract32(attrs, 0, 4)); 9801 } else { 9802 /* Index into MAIR registers for cache attributes */ 9803 uint8_t attrindx = extract32(attrs, 0, 3); 9804 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 9805 assert(attrindx <= 7); 9806 cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 9807 } 9808 cacheattrs->shareability = extract32(attrs, 6, 2); 9809 } 9810 9811 *phys_ptr = descaddr; 9812 *page_size_ptr = page_size; 9813 return false; 9814 9815 do_fault: 9816 fi->type = fault_type; 9817 fi->level = level; 9818 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ 9819 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS); 9820 return true; 9821 } 9822 9823 static inline void get_phys_addr_pmsav7_default(CPUARMState *env, 9824 ARMMMUIdx mmu_idx, 9825 int32_t address, int *prot) 9826 { 9827 if (!arm_feature(env, ARM_FEATURE_M)) { 9828 *prot = PAGE_READ | PAGE_WRITE; 9829 switch (address) { 9830 case 0xF0000000 ... 0xFFFFFFFF: 9831 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 9832 /* hivecs execing is ok */ 9833 *prot |= PAGE_EXEC; 9834 } 9835 break; 9836 case 0x00000000 ... 0x7FFFFFFF: 9837 *prot |= PAGE_EXEC; 9838 break; 9839 } 9840 } else { 9841 /* Default system address map for M profile cores. 9842 * The architecture specifies which regions are execute-never; 9843 * at the MPU level no other checks are defined. 9844 */ 9845 switch (address) { 9846 case 0x00000000 ... 0x1fffffff: /* ROM */ 9847 case 0x20000000 ... 0x3fffffff: /* SRAM */ 9848 case 0x60000000 ... 0x7fffffff: /* RAM */ 9849 case 0x80000000 ... 0x9fffffff: /* RAM */ 9850 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 9851 break; 9852 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 9853 case 0xa0000000 ... 0xbfffffff: /* Device */ 9854 case 0xc0000000 ... 0xdfffffff: /* Device */ 9855 case 0xe0000000 ... 0xffffffff: /* System */ 9856 *prot = PAGE_READ | PAGE_WRITE; 9857 break; 9858 default: 9859 g_assert_not_reached(); 9860 } 9861 } 9862 } 9863 9864 static bool pmsav7_use_background_region(ARMCPU *cpu, 9865 ARMMMUIdx mmu_idx, bool is_user) 9866 { 9867 /* Return true if we should use the default memory map as a 9868 * "background" region if there are no hits against any MPU regions. 9869 */ 9870 CPUARMState *env = &cpu->env; 9871 9872 if (is_user) { 9873 return false; 9874 } 9875 9876 if (arm_feature(env, ARM_FEATURE_M)) { 9877 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] 9878 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 9879 } else { 9880 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 9881 } 9882 } 9883 9884 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) 9885 { 9886 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 9887 return arm_feature(env, ARM_FEATURE_M) && 9888 extract32(address, 20, 12) == 0xe00; 9889 } 9890 9891 static inline bool m_is_system_region(CPUARMState *env, uint32_t address) 9892 { 9893 /* True if address is in the M profile system region 9894 * 0xe0000000 - 0xffffffff 9895 */ 9896 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 9897 } 9898 9899 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, 9900 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9901 hwaddr *phys_ptr, int *prot, 9902 target_ulong *page_size, 9903 ARMMMUFaultInfo *fi) 9904 { 9905 ARMCPU *cpu = arm_env_get_cpu(env); 9906 int n; 9907 bool is_user = regime_is_user(env, mmu_idx); 9908 9909 *phys_ptr = address; 9910 *page_size = TARGET_PAGE_SIZE; 9911 *prot = 0; 9912 9913 if (regime_translation_disabled(env, mmu_idx) || 9914 m_is_ppb_region(env, address)) { 9915 /* MPU disabled or M profile PPB access: use default memory map. 9916 * The other case which uses the default memory map in the 9917 * v7M ARM ARM pseudocode is exception vector reads from the vector 9918 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 9919 * which always does a direct read using address_space_ldl(), rather 9920 * than going via this function, so we don't need to check that here. 9921 */ 9922 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 9923 } else { /* MPU enabled */ 9924 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 9925 /* region search */ 9926 uint32_t base = env->pmsav7.drbar[n]; 9927 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 9928 uint32_t rmask; 9929 bool srdis = false; 9930 9931 if (!(env->pmsav7.drsr[n] & 0x1)) { 9932 continue; 9933 } 9934 9935 if (!rsize) { 9936 qemu_log_mask(LOG_GUEST_ERROR, 9937 "DRSR[%d]: Rsize field cannot be 0\n", n); 9938 continue; 9939 } 9940 rsize++; 9941 rmask = (1ull << rsize) - 1; 9942 9943 if (base & rmask) { 9944 qemu_log_mask(LOG_GUEST_ERROR, 9945 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 9946 "to DRSR region size, mask = 0x%" PRIx32 "\n", 9947 n, base, rmask); 9948 continue; 9949 } 9950 9951 if (address < base || address > base + rmask) { 9952 /* 9953 * Address not in this region. We must check whether the 9954 * region covers addresses in the same page as our address. 9955 * In that case we must not report a size that covers the 9956 * whole page for a subsequent hit against a different MPU 9957 * region or the background region, because it would result in 9958 * incorrect TLB hits for subsequent accesses to addresses that 9959 * are in this MPU region. 9960 */ 9961 if (ranges_overlap(base, rmask, 9962 address & TARGET_PAGE_MASK, 9963 TARGET_PAGE_SIZE)) { 9964 *page_size = 1; 9965 } 9966 continue; 9967 } 9968 9969 /* Region matched */ 9970 9971 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 9972 int i, snd; 9973 uint32_t srdis_mask; 9974 9975 rsize -= 3; /* sub region size (power of 2) */ 9976 snd = ((address - base) >> rsize) & 0x7; 9977 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 9978 9979 srdis_mask = srdis ? 0x3 : 0x0; 9980 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 9981 /* This will check in groups of 2, 4 and then 8, whether 9982 * the subregion bits are consistent. rsize is incremented 9983 * back up to give the region size, considering consistent 9984 * adjacent subregions as one region. Stop testing if rsize 9985 * is already big enough for an entire QEMU page. 9986 */ 9987 int snd_rounded = snd & ~(i - 1); 9988 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 9989 snd_rounded + 8, i); 9990 if (srdis_mask ^ srdis_multi) { 9991 break; 9992 } 9993 srdis_mask = (srdis_mask << i) | srdis_mask; 9994 rsize++; 9995 } 9996 } 9997 if (srdis) { 9998 continue; 9999 } 10000 if (rsize < TARGET_PAGE_BITS) { 10001 *page_size = 1 << rsize; 10002 } 10003 break; 10004 } 10005 10006 if (n == -1) { /* no hits */ 10007 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 10008 /* background fault */ 10009 fi->type = ARMFault_Background; 10010 return true; 10011 } 10012 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 10013 } else { /* a MPU hit! */ 10014 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 10015 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 10016 10017 if (m_is_system_region(env, address)) { 10018 /* System space is always execute never */ 10019 xn = 1; 10020 } 10021 10022 if (is_user) { /* User mode AP bit decoding */ 10023 switch (ap) { 10024 case 0: 10025 case 1: 10026 case 5: 10027 break; /* no access */ 10028 case 3: 10029 *prot |= PAGE_WRITE; 10030 /* fall through */ 10031 case 2: 10032 case 6: 10033 *prot |= PAGE_READ | PAGE_EXEC; 10034 break; 10035 case 7: 10036 /* for v7M, same as 6; for R profile a reserved value */ 10037 if (arm_feature(env, ARM_FEATURE_M)) { 10038 *prot |= PAGE_READ | PAGE_EXEC; 10039 break; 10040 } 10041 /* fall through */ 10042 default: 10043 qemu_log_mask(LOG_GUEST_ERROR, 10044 "DRACR[%d]: Bad value for AP bits: 0x%" 10045 PRIx32 "\n", n, ap); 10046 } 10047 } else { /* Priv. mode AP bits decoding */ 10048 switch (ap) { 10049 case 0: 10050 break; /* no access */ 10051 case 1: 10052 case 2: 10053 case 3: 10054 *prot |= PAGE_WRITE; 10055 /* fall through */ 10056 case 5: 10057 case 6: 10058 *prot |= PAGE_READ | PAGE_EXEC; 10059 break; 10060 case 7: 10061 /* for v7M, same as 6; for R profile a reserved value */ 10062 if (arm_feature(env, ARM_FEATURE_M)) { 10063 *prot |= PAGE_READ | PAGE_EXEC; 10064 break; 10065 } 10066 /* fall through */ 10067 default: 10068 qemu_log_mask(LOG_GUEST_ERROR, 10069 "DRACR[%d]: Bad value for AP bits: 0x%" 10070 PRIx32 "\n", n, ap); 10071 } 10072 } 10073 10074 /* execute never */ 10075 if (xn) { 10076 *prot &= ~PAGE_EXEC; 10077 } 10078 } 10079 } 10080 10081 fi->type = ARMFault_Permission; 10082 fi->level = 1; 10083 return !(*prot & (1 << access_type)); 10084 } 10085 10086 static bool v8m_is_sau_exempt(CPUARMState *env, 10087 uint32_t address, MMUAccessType access_type) 10088 { 10089 /* The architecture specifies that certain address ranges are 10090 * exempt from v8M SAU/IDAU checks. 10091 */ 10092 return 10093 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 10094 (address >= 0xe0000000 && address <= 0xe0002fff) || 10095 (address >= 0xe000e000 && address <= 0xe000efff) || 10096 (address >= 0xe002e000 && address <= 0xe002efff) || 10097 (address >= 0xe0040000 && address <= 0xe0041fff) || 10098 (address >= 0xe00ff000 && address <= 0xe00fffff); 10099 } 10100 10101 static void v8m_security_lookup(CPUARMState *env, uint32_t address, 10102 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10103 V8M_SAttributes *sattrs) 10104 { 10105 /* Look up the security attributes for this address. Compare the 10106 * pseudocode SecurityCheck() function. 10107 * We assume the caller has zero-initialized *sattrs. 10108 */ 10109 ARMCPU *cpu = arm_env_get_cpu(env); 10110 int r; 10111 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 10112 int idau_region = IREGION_NOTVALID; 10113 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 10114 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 10115 10116 if (cpu->idau) { 10117 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 10118 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 10119 10120 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 10121 &idau_nsc); 10122 } 10123 10124 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 10125 /* 0xf0000000..0xffffffff is always S for insn fetches */ 10126 return; 10127 } 10128 10129 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 10130 sattrs->ns = !regime_is_secure(env, mmu_idx); 10131 return; 10132 } 10133 10134 if (idau_region != IREGION_NOTVALID) { 10135 sattrs->irvalid = true; 10136 sattrs->iregion = idau_region; 10137 } 10138 10139 switch (env->sau.ctrl & 3) { 10140 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 10141 break; 10142 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 10143 sattrs->ns = true; 10144 break; 10145 default: /* SAU.ENABLE == 1 */ 10146 for (r = 0; r < cpu->sau_sregion; r++) { 10147 if (env->sau.rlar[r] & 1) { 10148 uint32_t base = env->sau.rbar[r] & ~0x1f; 10149 uint32_t limit = env->sau.rlar[r] | 0x1f; 10150 10151 if (base <= address && limit >= address) { 10152 if (base > addr_page_base || limit < addr_page_limit) { 10153 sattrs->subpage = true; 10154 } 10155 if (sattrs->srvalid) { 10156 /* If we hit in more than one region then we must report 10157 * as Secure, not NS-Callable, with no valid region 10158 * number info. 10159 */ 10160 sattrs->ns = false; 10161 sattrs->nsc = false; 10162 sattrs->sregion = 0; 10163 sattrs->srvalid = false; 10164 break; 10165 } else { 10166 if (env->sau.rlar[r] & 2) { 10167 sattrs->nsc = true; 10168 } else { 10169 sattrs->ns = true; 10170 } 10171 sattrs->srvalid = true; 10172 sattrs->sregion = r; 10173 } 10174 } else { 10175 /* 10176 * Address not in this region. We must check whether the 10177 * region covers addresses in the same page as our address. 10178 * In that case we must not report a size that covers the 10179 * whole page for a subsequent hit against a different MPU 10180 * region or the background region, because it would result 10181 * in incorrect TLB hits for subsequent accesses to 10182 * addresses that are in this MPU region. 10183 */ 10184 if (limit >= base && 10185 ranges_overlap(base, limit - base + 1, 10186 addr_page_base, 10187 TARGET_PAGE_SIZE)) { 10188 sattrs->subpage = true; 10189 } 10190 } 10191 } 10192 } 10193 10194 /* The IDAU will override the SAU lookup results if it specifies 10195 * higher security than the SAU does. 10196 */ 10197 if (!idau_ns) { 10198 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 10199 sattrs->ns = false; 10200 sattrs->nsc = idau_nsc; 10201 } 10202 } 10203 break; 10204 } 10205 } 10206 10207 static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 10208 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10209 hwaddr *phys_ptr, MemTxAttrs *txattrs, 10210 int *prot, bool *is_subpage, 10211 ARMMMUFaultInfo *fi, uint32_t *mregion) 10212 { 10213 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check 10214 * that a full phys-to-virt translation does). 10215 * mregion is (if not NULL) set to the region number which matched, 10216 * or -1 if no region number is returned (MPU off, address did not 10217 * hit a region, address hit in multiple regions). 10218 * We set is_subpage to true if the region hit doesn't cover the 10219 * entire TARGET_PAGE the address is within. 10220 */ 10221 ARMCPU *cpu = arm_env_get_cpu(env); 10222 bool is_user = regime_is_user(env, mmu_idx); 10223 uint32_t secure = regime_is_secure(env, mmu_idx); 10224 int n; 10225 int matchregion = -1; 10226 bool hit = false; 10227 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 10228 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 10229 10230 *is_subpage = false; 10231 *phys_ptr = address; 10232 *prot = 0; 10233 if (mregion) { 10234 *mregion = -1; 10235 } 10236 10237 /* Unlike the ARM ARM pseudocode, we don't need to check whether this 10238 * was an exception vector read from the vector table (which is always 10239 * done using the default system address map), because those accesses 10240 * are done in arm_v7m_load_vector(), which always does a direct 10241 * read using address_space_ldl(), rather than going via this function. 10242 */ 10243 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ 10244 hit = true; 10245 } else if (m_is_ppb_region(env, address)) { 10246 hit = true; 10247 } else if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 10248 hit = true; 10249 } else { 10250 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 10251 /* region search */ 10252 /* Note that the base address is bits [31:5] from the register 10253 * with bits [4:0] all zeroes, but the limit address is bits 10254 * [31:5] from the register with bits [4:0] all ones. 10255 */ 10256 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; 10257 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; 10258 10259 if (!(env->pmsav8.rlar[secure][n] & 0x1)) { 10260 /* Region disabled */ 10261 continue; 10262 } 10263 10264 if (address < base || address > limit) { 10265 /* 10266 * Address not in this region. We must check whether the 10267 * region covers addresses in the same page as our address. 10268 * In that case we must not report a size that covers the 10269 * whole page for a subsequent hit against a different MPU 10270 * region or the background region, because it would result in 10271 * incorrect TLB hits for subsequent accesses to addresses that 10272 * are in this MPU region. 10273 */ 10274 if (limit >= base && 10275 ranges_overlap(base, limit - base + 1, 10276 addr_page_base, 10277 TARGET_PAGE_SIZE)) { 10278 *is_subpage = true; 10279 } 10280 continue; 10281 } 10282 10283 if (base > addr_page_base || limit < addr_page_limit) { 10284 *is_subpage = true; 10285 } 10286 10287 if (hit) { 10288 /* Multiple regions match -- always a failure (unlike 10289 * PMSAv7 where highest-numbered-region wins) 10290 */ 10291 fi->type = ARMFault_Permission; 10292 fi->level = 1; 10293 return true; 10294 } 10295 10296 matchregion = n; 10297 hit = true; 10298 } 10299 } 10300 10301 if (!hit) { 10302 /* background fault */ 10303 fi->type = ARMFault_Background; 10304 return true; 10305 } 10306 10307 if (matchregion == -1) { 10308 /* hit using the background region */ 10309 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 10310 } else { 10311 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); 10312 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); 10313 10314 if (m_is_system_region(env, address)) { 10315 /* System space is always execute never */ 10316 xn = 1; 10317 } 10318 10319 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 10320 if (*prot && !xn) { 10321 *prot |= PAGE_EXEC; 10322 } 10323 /* We don't need to look the attribute up in the MAIR0/MAIR1 10324 * registers because that only tells us about cacheability. 10325 */ 10326 if (mregion) { 10327 *mregion = matchregion; 10328 } 10329 } 10330 10331 fi->type = ARMFault_Permission; 10332 fi->level = 1; 10333 return !(*prot & (1 << access_type)); 10334 } 10335 10336 10337 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, 10338 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10339 hwaddr *phys_ptr, MemTxAttrs *txattrs, 10340 int *prot, target_ulong *page_size, 10341 ARMMMUFaultInfo *fi) 10342 { 10343 uint32_t secure = regime_is_secure(env, mmu_idx); 10344 V8M_SAttributes sattrs = {}; 10345 bool ret; 10346 bool mpu_is_subpage; 10347 10348 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10349 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); 10350 if (access_type == MMU_INST_FETCH) { 10351 /* Instruction fetches always use the MMU bank and the 10352 * transaction attribute determined by the fetch address, 10353 * regardless of CPU state. This is painful for QEMU 10354 * to handle, because it would mean we need to encode 10355 * into the mmu_idx not just the (user, negpri) information 10356 * for the current security state but also that for the 10357 * other security state, which would balloon the number 10358 * of mmu_idx values needed alarmingly. 10359 * Fortunately we can avoid this because it's not actually 10360 * possible to arbitrarily execute code from memory with 10361 * the wrong security attribute: it will always generate 10362 * an exception of some kind or another, apart from the 10363 * special case of an NS CPU executing an SG instruction 10364 * in S&NSC memory. So we always just fail the translation 10365 * here and sort things out in the exception handler 10366 * (including possibly emulating an SG instruction). 10367 */ 10368 if (sattrs.ns != !secure) { 10369 if (sattrs.nsc) { 10370 fi->type = ARMFault_QEMU_NSCExec; 10371 } else { 10372 fi->type = ARMFault_QEMU_SFault; 10373 } 10374 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 10375 *phys_ptr = address; 10376 *prot = 0; 10377 return true; 10378 } 10379 } else { 10380 /* For data accesses we always use the MMU bank indicated 10381 * by the current CPU state, but the security attributes 10382 * might downgrade a secure access to nonsecure. 10383 */ 10384 if (sattrs.ns) { 10385 txattrs->secure = false; 10386 } else if (!secure) { 10387 /* NS access to S memory must fault. 10388 * Architecturally we should first check whether the 10389 * MPU information for this address indicates that we 10390 * are doing an unaligned access to Device memory, which 10391 * should generate a UsageFault instead. QEMU does not 10392 * currently check for that kind of unaligned access though. 10393 * If we added it we would need to do so as a special case 10394 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 10395 */ 10396 fi->type = ARMFault_QEMU_SFault; 10397 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 10398 *phys_ptr = address; 10399 *prot = 0; 10400 return true; 10401 } 10402 } 10403 } 10404 10405 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, 10406 txattrs, prot, &mpu_is_subpage, fi, NULL); 10407 /* 10408 * TODO: this is a temporary hack to ignore the fact that the SAU region 10409 * is smaller than a page if this is an executable region. We never 10410 * supported small MPU regions, but we did (accidentally) allow small 10411 * SAU regions, and if we now made small SAU regions not be executable 10412 * then this would break previously working guest code. We can't 10413 * remove this until/unless we implement support for execution from 10414 * small regions. 10415 */ 10416 if (*prot & PAGE_EXEC) { 10417 sattrs.subpage = false; 10418 } 10419 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; 10420 return ret; 10421 } 10422 10423 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, 10424 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10425 hwaddr *phys_ptr, int *prot, 10426 ARMMMUFaultInfo *fi) 10427 { 10428 int n; 10429 uint32_t mask; 10430 uint32_t base; 10431 bool is_user = regime_is_user(env, mmu_idx); 10432 10433 if (regime_translation_disabled(env, mmu_idx)) { 10434 /* MPU disabled. */ 10435 *phys_ptr = address; 10436 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10437 return false; 10438 } 10439 10440 *phys_ptr = address; 10441 for (n = 7; n >= 0; n--) { 10442 base = env->cp15.c6_region[n]; 10443 if ((base & 1) == 0) { 10444 continue; 10445 } 10446 mask = 1 << ((base >> 1) & 0x1f); 10447 /* Keep this shift separate from the above to avoid an 10448 (undefined) << 32. */ 10449 mask = (mask << 1) - 1; 10450 if (((base ^ address) & ~mask) == 0) { 10451 break; 10452 } 10453 } 10454 if (n < 0) { 10455 fi->type = ARMFault_Background; 10456 return true; 10457 } 10458 10459 if (access_type == MMU_INST_FETCH) { 10460 mask = env->cp15.pmsav5_insn_ap; 10461 } else { 10462 mask = env->cp15.pmsav5_data_ap; 10463 } 10464 mask = (mask >> (n * 4)) & 0xf; 10465 switch (mask) { 10466 case 0: 10467 fi->type = ARMFault_Permission; 10468 fi->level = 1; 10469 return true; 10470 case 1: 10471 if (is_user) { 10472 fi->type = ARMFault_Permission; 10473 fi->level = 1; 10474 return true; 10475 } 10476 *prot = PAGE_READ | PAGE_WRITE; 10477 break; 10478 case 2: 10479 *prot = PAGE_READ; 10480 if (!is_user) { 10481 *prot |= PAGE_WRITE; 10482 } 10483 break; 10484 case 3: 10485 *prot = PAGE_READ | PAGE_WRITE; 10486 break; 10487 case 5: 10488 if (is_user) { 10489 fi->type = ARMFault_Permission; 10490 fi->level = 1; 10491 return true; 10492 } 10493 *prot = PAGE_READ; 10494 break; 10495 case 6: 10496 *prot = PAGE_READ; 10497 break; 10498 default: 10499 /* Bad permission. */ 10500 fi->type = ARMFault_Permission; 10501 fi->level = 1; 10502 return true; 10503 } 10504 *prot |= PAGE_EXEC; 10505 return false; 10506 } 10507 10508 /* Combine either inner or outer cacheability attributes for normal 10509 * memory, according to table D4-42 and pseudocode procedure 10510 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 10511 * 10512 * NB: only stage 1 includes allocation hints (RW bits), leading to 10513 * some asymmetry. 10514 */ 10515 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 10516 { 10517 if (s1 == 4 || s2 == 4) { 10518 /* non-cacheable has precedence */ 10519 return 4; 10520 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 10521 /* stage 1 write-through takes precedence */ 10522 return s1; 10523 } else if (extract32(s2, 2, 2) == 2) { 10524 /* stage 2 write-through takes precedence, but the allocation hint 10525 * is still taken from stage 1 10526 */ 10527 return (2 << 2) | extract32(s1, 0, 2); 10528 } else { /* write-back */ 10529 return s1; 10530 } 10531 } 10532 10533 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 10534 * and CombineS1S2Desc() 10535 * 10536 * @s1: Attributes from stage 1 walk 10537 * @s2: Attributes from stage 2 walk 10538 */ 10539 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) 10540 { 10541 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4); 10542 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4); 10543 ARMCacheAttrs ret; 10544 10545 /* Combine shareability attributes (table D4-43) */ 10546 if (s1.shareability == 2 || s2.shareability == 2) { 10547 /* if either are outer-shareable, the result is outer-shareable */ 10548 ret.shareability = 2; 10549 } else if (s1.shareability == 3 || s2.shareability == 3) { 10550 /* if either are inner-shareable, the result is inner-shareable */ 10551 ret.shareability = 3; 10552 } else { 10553 /* both non-shareable */ 10554 ret.shareability = 0; 10555 } 10556 10557 /* Combine memory type and cacheability attributes */ 10558 if (s1hi == 0 || s2hi == 0) { 10559 /* Device has precedence over normal */ 10560 if (s1lo == 0 || s2lo == 0) { 10561 /* nGnRnE has precedence over anything */ 10562 ret.attrs = 0; 10563 } else if (s1lo == 4 || s2lo == 4) { 10564 /* non-Reordering has precedence over Reordering */ 10565 ret.attrs = 4; /* nGnRE */ 10566 } else if (s1lo == 8 || s2lo == 8) { 10567 /* non-Gathering has precedence over Gathering */ 10568 ret.attrs = 8; /* nGRE */ 10569 } else { 10570 ret.attrs = 0xc; /* GRE */ 10571 } 10572 10573 /* Any location for which the resultant memory type is any 10574 * type of Device memory is always treated as Outer Shareable. 10575 */ 10576 ret.shareability = 2; 10577 } else { /* Normal memory */ 10578 /* Outer/inner cacheability combine independently */ 10579 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 10580 | combine_cacheattr_nibble(s1lo, s2lo); 10581 10582 if (ret.attrs == 0x44) { 10583 /* Any location for which the resultant memory type is Normal 10584 * Inner Non-cacheable, Outer Non-cacheable is always treated 10585 * as Outer Shareable. 10586 */ 10587 ret.shareability = 2; 10588 } 10589 } 10590 10591 return ret; 10592 } 10593 10594 10595 /* get_phys_addr - get the physical address for this virtual address 10596 * 10597 * Find the physical address corresponding to the given virtual address, 10598 * by doing a translation table walk on MMU based systems or using the 10599 * MPU state on MPU based systems. 10600 * 10601 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 10602 * prot and page_size may not be filled in, and the populated fsr value provides 10603 * information on why the translation aborted, in the format of a 10604 * DFSR/IFSR fault register, with the following caveats: 10605 * * we honour the short vs long DFSR format differences. 10606 * * the WnR bit is never set (the caller must do this). 10607 * * for PSMAv5 based systems we don't bother to return a full FSR format 10608 * value. 10609 * 10610 * @env: CPUARMState 10611 * @address: virtual address to get physical address for 10612 * @access_type: 0 for read, 1 for write, 2 for execute 10613 * @mmu_idx: MMU index indicating required translation regime 10614 * @phys_ptr: set to the physical address corresponding to the virtual address 10615 * @attrs: set to the memory transaction attributes to use 10616 * @prot: set to the permissions for the page containing phys_ptr 10617 * @page_size: set to the size of the page containing phys_ptr 10618 * @fi: set to fault info if the translation fails 10619 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 10620 */ 10621 static bool get_phys_addr(CPUARMState *env, target_ulong address, 10622 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10623 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 10624 target_ulong *page_size, 10625 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 10626 { 10627 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 10628 /* Call ourselves recursively to do the stage 1 and then stage 2 10629 * translations. 10630 */ 10631 if (arm_feature(env, ARM_FEATURE_EL2)) { 10632 hwaddr ipa; 10633 int s2_prot; 10634 int ret; 10635 ARMCacheAttrs cacheattrs2 = {}; 10636 10637 ret = get_phys_addr(env, address, access_type, 10638 stage_1_mmu_idx(mmu_idx), &ipa, attrs, 10639 prot, page_size, fi, cacheattrs); 10640 10641 /* If S1 fails or S2 is disabled, return early. */ 10642 if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) { 10643 *phys_ptr = ipa; 10644 return ret; 10645 } 10646 10647 /* S1 is done. Now do S2 translation. */ 10648 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS, 10649 phys_ptr, attrs, &s2_prot, 10650 page_size, fi, 10651 cacheattrs != NULL ? &cacheattrs2 : NULL); 10652 fi->s2addr = ipa; 10653 /* Combine the S1 and S2 perms. */ 10654 *prot &= s2_prot; 10655 10656 /* Combine the S1 and S2 cache attributes, if needed */ 10657 if (!ret && cacheattrs != NULL) { 10658 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 10659 } 10660 10661 return ret; 10662 } else { 10663 /* 10664 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. 10665 */ 10666 mmu_idx = stage_1_mmu_idx(mmu_idx); 10667 } 10668 } 10669 10670 /* The page table entries may downgrade secure to non-secure, but 10671 * cannot upgrade an non-secure translation regime's attributes 10672 * to secure. 10673 */ 10674 attrs->secure = regime_is_secure(env, mmu_idx); 10675 attrs->user = regime_is_user(env, mmu_idx); 10676 10677 /* Fast Context Switch Extension. This doesn't exist at all in v8. 10678 * In v7 and earlier it affects all stage 1 translations. 10679 */ 10680 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS 10681 && !arm_feature(env, ARM_FEATURE_V8)) { 10682 if (regime_el(env, mmu_idx) == 3) { 10683 address += env->cp15.fcseidr_s; 10684 } else { 10685 address += env->cp15.fcseidr_ns; 10686 } 10687 } 10688 10689 if (arm_feature(env, ARM_FEATURE_PMSA)) { 10690 bool ret; 10691 *page_size = TARGET_PAGE_SIZE; 10692 10693 if (arm_feature(env, ARM_FEATURE_V8)) { 10694 /* PMSAv8 */ 10695 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, 10696 phys_ptr, attrs, prot, page_size, fi); 10697 } else if (arm_feature(env, ARM_FEATURE_V7)) { 10698 /* PMSAv7 */ 10699 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, 10700 phys_ptr, prot, page_size, fi); 10701 } else { 10702 /* Pre-v7 MPU */ 10703 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, 10704 phys_ptr, prot, fi); 10705 } 10706 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 10707 " mmu_idx %u -> %s (prot %c%c%c)\n", 10708 access_type == MMU_DATA_LOAD ? "reading" : 10709 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 10710 (uint32_t)address, mmu_idx, 10711 ret ? "Miss" : "Hit", 10712 *prot & PAGE_READ ? 'r' : '-', 10713 *prot & PAGE_WRITE ? 'w' : '-', 10714 *prot & PAGE_EXEC ? 'x' : '-'); 10715 10716 return ret; 10717 } 10718 10719 /* Definitely a real MMU, not an MPU */ 10720 10721 if (regime_translation_disabled(env, mmu_idx)) { 10722 /* MMU disabled. */ 10723 *phys_ptr = address; 10724 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10725 *page_size = TARGET_PAGE_SIZE; 10726 return 0; 10727 } 10728 10729 if (regime_using_lpae_format(env, mmu_idx)) { 10730 return get_phys_addr_lpae(env, address, access_type, mmu_idx, 10731 phys_ptr, attrs, prot, page_size, 10732 fi, cacheattrs); 10733 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { 10734 return get_phys_addr_v6(env, address, access_type, mmu_idx, 10735 phys_ptr, attrs, prot, page_size, fi); 10736 } else { 10737 return get_phys_addr_v5(env, address, access_type, mmu_idx, 10738 phys_ptr, prot, page_size, fi); 10739 } 10740 } 10741 10742 /* Walk the page table and (if the mapping exists) add the page 10743 * to the TLB. Return false on success, or true on failure. Populate 10744 * fsr with ARM DFSR/IFSR fault register format value on failure. 10745 */ 10746 bool arm_tlb_fill(CPUState *cs, vaddr address, 10747 MMUAccessType access_type, int mmu_idx, 10748 ARMMMUFaultInfo *fi) 10749 { 10750 ARMCPU *cpu = ARM_CPU(cs); 10751 CPUARMState *env = &cpu->env; 10752 hwaddr phys_addr; 10753 target_ulong page_size; 10754 int prot; 10755 int ret; 10756 MemTxAttrs attrs = {}; 10757 10758 ret = get_phys_addr(env, address, access_type, 10759 core_to_arm_mmu_idx(env, mmu_idx), &phys_addr, 10760 &attrs, &prot, &page_size, fi, NULL); 10761 if (!ret) { 10762 /* 10763 * Map a single [sub]page. Regions smaller than our declared 10764 * target page size are handled specially, so for those we 10765 * pass in the exact addresses. 10766 */ 10767 if (page_size >= TARGET_PAGE_SIZE) { 10768 phys_addr &= TARGET_PAGE_MASK; 10769 address &= TARGET_PAGE_MASK; 10770 } 10771 tlb_set_page_with_attrs(cs, address, phys_addr, attrs, 10772 prot, mmu_idx, page_size); 10773 return 0; 10774 } 10775 10776 return ret; 10777 } 10778 10779 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 10780 MemTxAttrs *attrs) 10781 { 10782 ARMCPU *cpu = ARM_CPU(cs); 10783 CPUARMState *env = &cpu->env; 10784 hwaddr phys_addr; 10785 target_ulong page_size; 10786 int prot; 10787 bool ret; 10788 ARMMMUFaultInfo fi = {}; 10789 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 10790 10791 *attrs = (MemTxAttrs) {}; 10792 10793 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, 10794 attrs, &prot, &page_size, &fi, NULL); 10795 10796 if (ret) { 10797 return -1; 10798 } 10799 return phys_addr; 10800 } 10801 10802 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 10803 { 10804 uint32_t mask; 10805 unsigned el = arm_current_el(env); 10806 10807 /* First handle registers which unprivileged can read */ 10808 10809 switch (reg) { 10810 case 0 ... 7: /* xPSR sub-fields */ 10811 mask = 0; 10812 if ((reg & 1) && el) { 10813 mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */ 10814 } 10815 if (!(reg & 4)) { 10816 mask |= XPSR_NZCV | XPSR_Q; /* APSR */ 10817 } 10818 /* EPSR reads as zero */ 10819 return xpsr_read(env) & mask; 10820 break; 10821 case 20: /* CONTROL */ 10822 return env->v7m.control[env->v7m.secure]; 10823 case 0x94: /* CONTROL_NS */ 10824 /* We have to handle this here because unprivileged Secure code 10825 * can read the NS CONTROL register. 10826 */ 10827 if (!env->v7m.secure) { 10828 return 0; 10829 } 10830 return env->v7m.control[M_REG_NS]; 10831 } 10832 10833 if (el == 0) { 10834 return 0; /* unprivileged reads others as zero */ 10835 } 10836 10837 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10838 switch (reg) { 10839 case 0x88: /* MSP_NS */ 10840 if (!env->v7m.secure) { 10841 return 0; 10842 } 10843 return env->v7m.other_ss_msp; 10844 case 0x89: /* PSP_NS */ 10845 if (!env->v7m.secure) { 10846 return 0; 10847 } 10848 return env->v7m.other_ss_psp; 10849 case 0x8a: /* MSPLIM_NS */ 10850 if (!env->v7m.secure) { 10851 return 0; 10852 } 10853 return env->v7m.msplim[M_REG_NS]; 10854 case 0x8b: /* PSPLIM_NS */ 10855 if (!env->v7m.secure) { 10856 return 0; 10857 } 10858 return env->v7m.psplim[M_REG_NS]; 10859 case 0x90: /* PRIMASK_NS */ 10860 if (!env->v7m.secure) { 10861 return 0; 10862 } 10863 return env->v7m.primask[M_REG_NS]; 10864 case 0x91: /* BASEPRI_NS */ 10865 if (!env->v7m.secure) { 10866 return 0; 10867 } 10868 return env->v7m.basepri[M_REG_NS]; 10869 case 0x93: /* FAULTMASK_NS */ 10870 if (!env->v7m.secure) { 10871 return 0; 10872 } 10873 return env->v7m.faultmask[M_REG_NS]; 10874 case 0x98: /* SP_NS */ 10875 { 10876 /* This gives the non-secure SP selected based on whether we're 10877 * currently in handler mode or not, using the NS CONTROL.SPSEL. 10878 */ 10879 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; 10880 10881 if (!env->v7m.secure) { 10882 return 0; 10883 } 10884 if (!arm_v7m_is_handler_mode(env) && spsel) { 10885 return env->v7m.other_ss_psp; 10886 } else { 10887 return env->v7m.other_ss_msp; 10888 } 10889 } 10890 default: 10891 break; 10892 } 10893 } 10894 10895 switch (reg) { 10896 case 8: /* MSP */ 10897 return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13]; 10898 case 9: /* PSP */ 10899 return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp; 10900 case 10: /* MSPLIM */ 10901 if (!arm_feature(env, ARM_FEATURE_V8)) { 10902 goto bad_reg; 10903 } 10904 return env->v7m.msplim[env->v7m.secure]; 10905 case 11: /* PSPLIM */ 10906 if (!arm_feature(env, ARM_FEATURE_V8)) { 10907 goto bad_reg; 10908 } 10909 return env->v7m.psplim[env->v7m.secure]; 10910 case 16: /* PRIMASK */ 10911 return env->v7m.primask[env->v7m.secure]; 10912 case 17: /* BASEPRI */ 10913 case 18: /* BASEPRI_MAX */ 10914 return env->v7m.basepri[env->v7m.secure]; 10915 case 19: /* FAULTMASK */ 10916 return env->v7m.faultmask[env->v7m.secure]; 10917 default: 10918 bad_reg: 10919 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special" 10920 " register %d\n", reg); 10921 return 0; 10922 } 10923 } 10924 10925 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) 10926 { 10927 /* We're passed bits [11..0] of the instruction; extract 10928 * SYSm and the mask bits. 10929 * Invalid combinations of SYSm and mask are UNPREDICTABLE; 10930 * we choose to treat them as if the mask bits were valid. 10931 * NB that the pseudocode 'mask' variable is bits [11..10], 10932 * whereas ours is [11..8]. 10933 */ 10934 uint32_t mask = extract32(maskreg, 8, 4); 10935 uint32_t reg = extract32(maskreg, 0, 8); 10936 10937 if (arm_current_el(env) == 0 && reg > 7) { 10938 /* only xPSR sub-fields may be written by unprivileged */ 10939 return; 10940 } 10941 10942 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10943 switch (reg) { 10944 case 0x88: /* MSP_NS */ 10945 if (!env->v7m.secure) { 10946 return; 10947 } 10948 env->v7m.other_ss_msp = val; 10949 return; 10950 case 0x89: /* PSP_NS */ 10951 if (!env->v7m.secure) { 10952 return; 10953 } 10954 env->v7m.other_ss_psp = val; 10955 return; 10956 case 0x8a: /* MSPLIM_NS */ 10957 if (!env->v7m.secure) { 10958 return; 10959 } 10960 env->v7m.msplim[M_REG_NS] = val & ~7; 10961 return; 10962 case 0x8b: /* PSPLIM_NS */ 10963 if (!env->v7m.secure) { 10964 return; 10965 } 10966 env->v7m.psplim[M_REG_NS] = val & ~7; 10967 return; 10968 case 0x90: /* PRIMASK_NS */ 10969 if (!env->v7m.secure) { 10970 return; 10971 } 10972 env->v7m.primask[M_REG_NS] = val & 1; 10973 return; 10974 case 0x91: /* BASEPRI_NS */ 10975 if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) { 10976 return; 10977 } 10978 env->v7m.basepri[M_REG_NS] = val & 0xff; 10979 return; 10980 case 0x93: /* FAULTMASK_NS */ 10981 if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) { 10982 return; 10983 } 10984 env->v7m.faultmask[M_REG_NS] = val & 1; 10985 return; 10986 case 0x94: /* CONTROL_NS */ 10987 if (!env->v7m.secure) { 10988 return; 10989 } 10990 write_v7m_control_spsel_for_secstate(env, 10991 val & R_V7M_CONTROL_SPSEL_MASK, 10992 M_REG_NS); 10993 if (arm_feature(env, ARM_FEATURE_M_MAIN)) { 10994 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK; 10995 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK; 10996 } 10997 return; 10998 case 0x98: /* SP_NS */ 10999 { 11000 /* This gives the non-secure SP selected based on whether we're 11001 * currently in handler mode or not, using the NS CONTROL.SPSEL. 11002 */ 11003 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; 11004 bool is_psp = !arm_v7m_is_handler_mode(env) && spsel; 11005 uint32_t limit; 11006 11007 if (!env->v7m.secure) { 11008 return; 11009 } 11010 11011 limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false]; 11012 11013 if (val < limit) { 11014 CPUState *cs = CPU(arm_env_get_cpu(env)); 11015 11016 cpu_restore_state(cs, GETPC(), true); 11017 raise_exception(env, EXCP_STKOF, 0, 1); 11018 } 11019 11020 if (is_psp) { 11021 env->v7m.other_ss_psp = val; 11022 } else { 11023 env->v7m.other_ss_msp = val; 11024 } 11025 return; 11026 } 11027 default: 11028 break; 11029 } 11030 } 11031 11032 switch (reg) { 11033 case 0 ... 7: /* xPSR sub-fields */ 11034 /* only APSR is actually writable */ 11035 if (!(reg & 4)) { 11036 uint32_t apsrmask = 0; 11037 11038 if (mask & 8) { 11039 apsrmask |= XPSR_NZCV | XPSR_Q; 11040 } 11041 if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) { 11042 apsrmask |= XPSR_GE; 11043 } 11044 xpsr_write(env, val, apsrmask); 11045 } 11046 break; 11047 case 8: /* MSP */ 11048 if (v7m_using_psp(env)) { 11049 env->v7m.other_sp = val; 11050 } else { 11051 env->regs[13] = val; 11052 } 11053 break; 11054 case 9: /* PSP */ 11055 if (v7m_using_psp(env)) { 11056 env->regs[13] = val; 11057 } else { 11058 env->v7m.other_sp = val; 11059 } 11060 break; 11061 case 10: /* MSPLIM */ 11062 if (!arm_feature(env, ARM_FEATURE_V8)) { 11063 goto bad_reg; 11064 } 11065 env->v7m.msplim[env->v7m.secure] = val & ~7; 11066 break; 11067 case 11: /* PSPLIM */ 11068 if (!arm_feature(env, ARM_FEATURE_V8)) { 11069 goto bad_reg; 11070 } 11071 env->v7m.psplim[env->v7m.secure] = val & ~7; 11072 break; 11073 case 16: /* PRIMASK */ 11074 env->v7m.primask[env->v7m.secure] = val & 1; 11075 break; 11076 case 17: /* BASEPRI */ 11077 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { 11078 goto bad_reg; 11079 } 11080 env->v7m.basepri[env->v7m.secure] = val & 0xff; 11081 break; 11082 case 18: /* BASEPRI_MAX */ 11083 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { 11084 goto bad_reg; 11085 } 11086 val &= 0xff; 11087 if (val != 0 && (val < env->v7m.basepri[env->v7m.secure] 11088 || env->v7m.basepri[env->v7m.secure] == 0)) { 11089 env->v7m.basepri[env->v7m.secure] = val; 11090 } 11091 break; 11092 case 19: /* FAULTMASK */ 11093 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { 11094 goto bad_reg; 11095 } 11096 env->v7m.faultmask[env->v7m.secure] = val & 1; 11097 break; 11098 case 20: /* CONTROL */ 11099 /* Writing to the SPSEL bit only has an effect if we are in 11100 * thread mode; other bits can be updated by any privileged code. 11101 * write_v7m_control_spsel() deals with updating the SPSEL bit in 11102 * env->v7m.control, so we only need update the others. 11103 * For v7M, we must just ignore explicit writes to SPSEL in handler 11104 * mode; for v8M the write is permitted but will have no effect. 11105 */ 11106 if (arm_feature(env, ARM_FEATURE_V8) || 11107 !arm_v7m_is_handler_mode(env)) { 11108 write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); 11109 } 11110 if (arm_feature(env, ARM_FEATURE_M_MAIN)) { 11111 env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK; 11112 env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK; 11113 } 11114 break; 11115 default: 11116 bad_reg: 11117 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special" 11118 " register %d\n", reg); 11119 return; 11120 } 11121 } 11122 11123 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) 11124 { 11125 /* Implement the TT instruction. op is bits [7:6] of the insn. */ 11126 bool forceunpriv = op & 1; 11127 bool alt = op & 2; 11128 V8M_SAttributes sattrs = {}; 11129 uint32_t tt_resp; 11130 bool r, rw, nsr, nsrw, mrvalid; 11131 int prot; 11132 ARMMMUFaultInfo fi = {}; 11133 MemTxAttrs attrs = {}; 11134 hwaddr phys_addr; 11135 ARMMMUIdx mmu_idx; 11136 uint32_t mregion; 11137 bool targetpriv; 11138 bool targetsec = env->v7m.secure; 11139 bool is_subpage; 11140 11141 /* Work out what the security state and privilege level we're 11142 * interested in is... 11143 */ 11144 if (alt) { 11145 targetsec = !targetsec; 11146 } 11147 11148 if (forceunpriv) { 11149 targetpriv = false; 11150 } else { 11151 targetpriv = arm_v7m_is_handler_mode(env) || 11152 !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK); 11153 } 11154 11155 /* ...and then figure out which MMU index this is */ 11156 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv); 11157 11158 /* We know that the MPU and SAU don't care about the access type 11159 * for our purposes beyond that we don't want to claim to be 11160 * an insn fetch, so we arbitrarily call this a read. 11161 */ 11162 11163 /* MPU region info only available for privileged or if 11164 * inspecting the other MPU state. 11165 */ 11166 if (arm_current_el(env) != 0 || alt) { 11167 /* We can ignore the return value as prot is always set */ 11168 pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, 11169 &phys_addr, &attrs, &prot, &is_subpage, 11170 &fi, &mregion); 11171 if (mregion == -1) { 11172 mrvalid = false; 11173 mregion = 0; 11174 } else { 11175 mrvalid = true; 11176 } 11177 r = prot & PAGE_READ; 11178 rw = prot & PAGE_WRITE; 11179 } else { 11180 r = false; 11181 rw = false; 11182 mrvalid = false; 11183 mregion = 0; 11184 } 11185 11186 if (env->v7m.secure) { 11187 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); 11188 nsr = sattrs.ns && r; 11189 nsrw = sattrs.ns && rw; 11190 } else { 11191 sattrs.ns = true; 11192 nsr = false; 11193 nsrw = false; 11194 } 11195 11196 tt_resp = (sattrs.iregion << 24) | 11197 (sattrs.irvalid << 23) | 11198 ((!sattrs.ns) << 22) | 11199 (nsrw << 21) | 11200 (nsr << 20) | 11201 (rw << 19) | 11202 (r << 18) | 11203 (sattrs.srvalid << 17) | 11204 (mrvalid << 16) | 11205 (sattrs.sregion << 8) | 11206 mregion; 11207 11208 return tt_resp; 11209 } 11210 11211 #endif 11212 11213 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) 11214 { 11215 /* Implement DC ZVA, which zeroes a fixed-length block of memory. 11216 * Note that we do not implement the (architecturally mandated) 11217 * alignment fault for attempts to use this on Device memory 11218 * (which matches the usual QEMU behaviour of not implementing either 11219 * alignment faults or any memory attribute handling). 11220 */ 11221 11222 ARMCPU *cpu = arm_env_get_cpu(env); 11223 uint64_t blocklen = 4 << cpu->dcz_blocksize; 11224 uint64_t vaddr = vaddr_in & ~(blocklen - 1); 11225 11226 #ifndef CONFIG_USER_ONLY 11227 { 11228 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than 11229 * the block size so we might have to do more than one TLB lookup. 11230 * We know that in fact for any v8 CPU the page size is at least 4K 11231 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only 11232 * 1K as an artefact of legacy v5 subpage support being present in the 11233 * same QEMU executable. 11234 */ 11235 int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE); 11236 void *hostaddr[maxidx]; 11237 int try, i; 11238 unsigned mmu_idx = cpu_mmu_index(env, false); 11239 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); 11240 11241 for (try = 0; try < 2; try++) { 11242 11243 for (i = 0; i < maxidx; i++) { 11244 hostaddr[i] = tlb_vaddr_to_host(env, 11245 vaddr + TARGET_PAGE_SIZE * i, 11246 1, mmu_idx); 11247 if (!hostaddr[i]) { 11248 break; 11249 } 11250 } 11251 if (i == maxidx) { 11252 /* If it's all in the TLB it's fair game for just writing to; 11253 * we know we don't need to update dirty status, etc. 11254 */ 11255 for (i = 0; i < maxidx - 1; i++) { 11256 memset(hostaddr[i], 0, TARGET_PAGE_SIZE); 11257 } 11258 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE)); 11259 return; 11260 } 11261 /* OK, try a store and see if we can populate the tlb. This 11262 * might cause an exception if the memory isn't writable, 11263 * in which case we will longjmp out of here. We must for 11264 * this purpose use the actual register value passed to us 11265 * so that we get the fault address right. 11266 */ 11267 helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC()); 11268 /* Now we can populate the other TLB entries, if any */ 11269 for (i = 0; i < maxidx; i++) { 11270 uint64_t va = vaddr + TARGET_PAGE_SIZE * i; 11271 if (va != (vaddr_in & TARGET_PAGE_MASK)) { 11272 helper_ret_stb_mmu(env, va, 0, oi, GETPC()); 11273 } 11274 } 11275 } 11276 11277 /* Slow path (probably attempt to do this to an I/O device or 11278 * similar, or clearing of a block of code we have translations 11279 * cached for). Just do a series of byte writes as the architecture 11280 * demands. It's not worth trying to use a cpu_physical_memory_map(), 11281 * memset(), unmap() sequence here because: 11282 * + we'd need to account for the blocksize being larger than a page 11283 * + the direct-RAM access case is almost always going to be dealt 11284 * with in the fastpath code above, so there's no speed benefit 11285 * + we would have to deal with the map returning NULL because the 11286 * bounce buffer was in use 11287 */ 11288 for (i = 0; i < blocklen; i++) { 11289 helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC()); 11290 } 11291 } 11292 #else 11293 memset(g2h(vaddr), 0, blocklen); 11294 #endif 11295 } 11296 11297 /* Note that signed overflow is undefined in C. The following routines are 11298 careful to use unsigned types where modulo arithmetic is required. 11299 Failure to do so _will_ break on newer gcc. */ 11300 11301 /* Signed saturating arithmetic. */ 11302 11303 /* Perform 16-bit signed saturating addition. */ 11304 static inline uint16_t add16_sat(uint16_t a, uint16_t b) 11305 { 11306 uint16_t res; 11307 11308 res = a + b; 11309 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { 11310 if (a & 0x8000) 11311 res = 0x8000; 11312 else 11313 res = 0x7fff; 11314 } 11315 return res; 11316 } 11317 11318 /* Perform 8-bit signed saturating addition. */ 11319 static inline uint8_t add8_sat(uint8_t a, uint8_t b) 11320 { 11321 uint8_t res; 11322 11323 res = a + b; 11324 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { 11325 if (a & 0x80) 11326 res = 0x80; 11327 else 11328 res = 0x7f; 11329 } 11330 return res; 11331 } 11332 11333 /* Perform 16-bit signed saturating subtraction. */ 11334 static inline uint16_t sub16_sat(uint16_t a, uint16_t b) 11335 { 11336 uint16_t res; 11337 11338 res = a - b; 11339 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { 11340 if (a & 0x8000) 11341 res = 0x8000; 11342 else 11343 res = 0x7fff; 11344 } 11345 return res; 11346 } 11347 11348 /* Perform 8-bit signed saturating subtraction. */ 11349 static inline uint8_t sub8_sat(uint8_t a, uint8_t b) 11350 { 11351 uint8_t res; 11352 11353 res = a - b; 11354 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { 11355 if (a & 0x80) 11356 res = 0x80; 11357 else 11358 res = 0x7f; 11359 } 11360 return res; 11361 } 11362 11363 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); 11364 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); 11365 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); 11366 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); 11367 #define PFX q 11368 11369 #include "op_addsub.h" 11370 11371 /* Unsigned saturating arithmetic. */ 11372 static inline uint16_t add16_usat(uint16_t a, uint16_t b) 11373 { 11374 uint16_t res; 11375 res = a + b; 11376 if (res < a) 11377 res = 0xffff; 11378 return res; 11379 } 11380 11381 static inline uint16_t sub16_usat(uint16_t a, uint16_t b) 11382 { 11383 if (a > b) 11384 return a - b; 11385 else 11386 return 0; 11387 } 11388 11389 static inline uint8_t add8_usat(uint8_t a, uint8_t b) 11390 { 11391 uint8_t res; 11392 res = a + b; 11393 if (res < a) 11394 res = 0xff; 11395 return res; 11396 } 11397 11398 static inline uint8_t sub8_usat(uint8_t a, uint8_t b) 11399 { 11400 if (a > b) 11401 return a - b; 11402 else 11403 return 0; 11404 } 11405 11406 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); 11407 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); 11408 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); 11409 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); 11410 #define PFX uq 11411 11412 #include "op_addsub.h" 11413 11414 /* Signed modulo arithmetic. */ 11415 #define SARITH16(a, b, n, op) do { \ 11416 int32_t sum; \ 11417 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ 11418 RESULT(sum, n, 16); \ 11419 if (sum >= 0) \ 11420 ge |= 3 << (n * 2); \ 11421 } while(0) 11422 11423 #define SARITH8(a, b, n, op) do { \ 11424 int32_t sum; \ 11425 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ 11426 RESULT(sum, n, 8); \ 11427 if (sum >= 0) \ 11428 ge |= 1 << n; \ 11429 } while(0) 11430 11431 11432 #define ADD16(a, b, n) SARITH16(a, b, n, +) 11433 #define SUB16(a, b, n) SARITH16(a, b, n, -) 11434 #define ADD8(a, b, n) SARITH8(a, b, n, +) 11435 #define SUB8(a, b, n) SARITH8(a, b, n, -) 11436 #define PFX s 11437 #define ARITH_GE 11438 11439 #include "op_addsub.h" 11440 11441 /* Unsigned modulo arithmetic. */ 11442 #define ADD16(a, b, n) do { \ 11443 uint32_t sum; \ 11444 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ 11445 RESULT(sum, n, 16); \ 11446 if ((sum >> 16) == 1) \ 11447 ge |= 3 << (n * 2); \ 11448 } while(0) 11449 11450 #define ADD8(a, b, n) do { \ 11451 uint32_t sum; \ 11452 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ 11453 RESULT(sum, n, 8); \ 11454 if ((sum >> 8) == 1) \ 11455 ge |= 1 << n; \ 11456 } while(0) 11457 11458 #define SUB16(a, b, n) do { \ 11459 uint32_t sum; \ 11460 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ 11461 RESULT(sum, n, 16); \ 11462 if ((sum >> 16) == 0) \ 11463 ge |= 3 << (n * 2); \ 11464 } while(0) 11465 11466 #define SUB8(a, b, n) do { \ 11467 uint32_t sum; \ 11468 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ 11469 RESULT(sum, n, 8); \ 11470 if ((sum >> 8) == 0) \ 11471 ge |= 1 << n; \ 11472 } while(0) 11473 11474 #define PFX u 11475 #define ARITH_GE 11476 11477 #include "op_addsub.h" 11478 11479 /* Halved signed arithmetic. */ 11480 #define ADD16(a, b, n) \ 11481 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) 11482 #define SUB16(a, b, n) \ 11483 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) 11484 #define ADD8(a, b, n) \ 11485 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) 11486 #define SUB8(a, b, n) \ 11487 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) 11488 #define PFX sh 11489 11490 #include "op_addsub.h" 11491 11492 /* Halved unsigned arithmetic. */ 11493 #define ADD16(a, b, n) \ 11494 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) 11495 #define SUB16(a, b, n) \ 11496 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) 11497 #define ADD8(a, b, n) \ 11498 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) 11499 #define SUB8(a, b, n) \ 11500 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) 11501 #define PFX uh 11502 11503 #include "op_addsub.h" 11504 11505 static inline uint8_t do_usad(uint8_t a, uint8_t b) 11506 { 11507 if (a > b) 11508 return a - b; 11509 else 11510 return b - a; 11511 } 11512 11513 /* Unsigned sum of absolute byte differences. */ 11514 uint32_t HELPER(usad8)(uint32_t a, uint32_t b) 11515 { 11516 uint32_t sum; 11517 sum = do_usad(a, b); 11518 sum += do_usad(a >> 8, b >> 8); 11519 sum += do_usad(a >> 16, b >>16); 11520 sum += do_usad(a >> 24, b >> 24); 11521 return sum; 11522 } 11523 11524 /* For ARMv6 SEL instruction. */ 11525 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) 11526 { 11527 uint32_t mask; 11528 11529 mask = 0; 11530 if (flags & 1) 11531 mask |= 0xff; 11532 if (flags & 2) 11533 mask |= 0xff00; 11534 if (flags & 4) 11535 mask |= 0xff0000; 11536 if (flags & 8) 11537 mask |= 0xff000000; 11538 return (a & mask) | (b & ~mask); 11539 } 11540 11541 /* VFP support. We follow the convention used for VFP instructions: 11542 Single precision routines have a "s" suffix, double precision a 11543 "d" suffix. */ 11544 11545 /* Convert host exception flags to vfp form. */ 11546 static inline int vfp_exceptbits_from_host(int host_bits) 11547 { 11548 int target_bits = 0; 11549 11550 if (host_bits & float_flag_invalid) 11551 target_bits |= 1; 11552 if (host_bits & float_flag_divbyzero) 11553 target_bits |= 2; 11554 if (host_bits & float_flag_overflow) 11555 target_bits |= 4; 11556 if (host_bits & (float_flag_underflow | float_flag_output_denormal)) 11557 target_bits |= 8; 11558 if (host_bits & float_flag_inexact) 11559 target_bits |= 0x10; 11560 if (host_bits & float_flag_input_denormal) 11561 target_bits |= 0x80; 11562 return target_bits; 11563 } 11564 11565 uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) 11566 { 11567 int i; 11568 uint32_t fpscr; 11569 11570 fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff) 11571 | (env->vfp.vec_len << 16) 11572 | (env->vfp.vec_stride << 20); 11573 11574 i = get_float_exception_flags(&env->vfp.fp_status); 11575 i |= get_float_exception_flags(&env->vfp.standard_fp_status); 11576 /* FZ16 does not generate an input denormal exception. */ 11577 i |= (get_float_exception_flags(&env->vfp.fp_status_f16) 11578 & ~float_flag_input_denormal); 11579 11580 fpscr |= vfp_exceptbits_from_host(i); 11581 return fpscr; 11582 } 11583 11584 uint32_t vfp_get_fpscr(CPUARMState *env) 11585 { 11586 return HELPER(vfp_get_fpscr)(env); 11587 } 11588 11589 /* Convert vfp exception flags to target form. */ 11590 static inline int vfp_exceptbits_to_host(int target_bits) 11591 { 11592 int host_bits = 0; 11593 11594 if (target_bits & 1) 11595 host_bits |= float_flag_invalid; 11596 if (target_bits & 2) 11597 host_bits |= float_flag_divbyzero; 11598 if (target_bits & 4) 11599 host_bits |= float_flag_overflow; 11600 if (target_bits & 8) 11601 host_bits |= float_flag_underflow; 11602 if (target_bits & 0x10) 11603 host_bits |= float_flag_inexact; 11604 if (target_bits & 0x80) 11605 host_bits |= float_flag_input_denormal; 11606 return host_bits; 11607 } 11608 11609 void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) 11610 { 11611 int i; 11612 uint32_t changed; 11613 11614 /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */ 11615 if (!arm_feature(env, ARM_FEATURE_V8_FP16)) { 11616 val &= ~FPCR_FZ16; 11617 } 11618 11619 changed = env->vfp.xregs[ARM_VFP_FPSCR]; 11620 env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff); 11621 env->vfp.vec_len = (val >> 16) & 7; 11622 env->vfp.vec_stride = (val >> 20) & 3; 11623 11624 changed ^= val; 11625 if (changed & (3 << 22)) { 11626 i = (val >> 22) & 3; 11627 switch (i) { 11628 case FPROUNDING_TIEEVEN: 11629 i = float_round_nearest_even; 11630 break; 11631 case FPROUNDING_POSINF: 11632 i = float_round_up; 11633 break; 11634 case FPROUNDING_NEGINF: 11635 i = float_round_down; 11636 break; 11637 case FPROUNDING_ZERO: 11638 i = float_round_to_zero; 11639 break; 11640 } 11641 set_float_rounding_mode(i, &env->vfp.fp_status); 11642 set_float_rounding_mode(i, &env->vfp.fp_status_f16); 11643 } 11644 if (changed & FPCR_FZ16) { 11645 bool ftz_enabled = val & FPCR_FZ16; 11646 set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16); 11647 set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16); 11648 } 11649 if (changed & FPCR_FZ) { 11650 bool ftz_enabled = val & FPCR_FZ; 11651 set_flush_to_zero(ftz_enabled, &env->vfp.fp_status); 11652 set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status); 11653 } 11654 if (changed & FPCR_DN) { 11655 bool dnan_enabled = val & FPCR_DN; 11656 set_default_nan_mode(dnan_enabled, &env->vfp.fp_status); 11657 set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16); 11658 } 11659 11660 /* The exception flags are ORed together when we read fpscr so we 11661 * only need to preserve the current state in one of our 11662 * float_status values. 11663 */ 11664 i = vfp_exceptbits_to_host(val); 11665 set_float_exception_flags(i, &env->vfp.fp_status); 11666 set_float_exception_flags(0, &env->vfp.fp_status_f16); 11667 set_float_exception_flags(0, &env->vfp.standard_fp_status); 11668 } 11669 11670 void vfp_set_fpscr(CPUARMState *env, uint32_t val) 11671 { 11672 HELPER(vfp_set_fpscr)(env, val); 11673 } 11674 11675 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) 11676 11677 #define VFP_BINOP(name) \ 11678 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \ 11679 { \ 11680 float_status *fpst = fpstp; \ 11681 return float32_ ## name(a, b, fpst); \ 11682 } \ 11683 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \ 11684 { \ 11685 float_status *fpst = fpstp; \ 11686 return float64_ ## name(a, b, fpst); \ 11687 } 11688 VFP_BINOP(add) 11689 VFP_BINOP(sub) 11690 VFP_BINOP(mul) 11691 VFP_BINOP(div) 11692 VFP_BINOP(min) 11693 VFP_BINOP(max) 11694 VFP_BINOP(minnum) 11695 VFP_BINOP(maxnum) 11696 #undef VFP_BINOP 11697 11698 float32 VFP_HELPER(neg, s)(float32 a) 11699 { 11700 return float32_chs(a); 11701 } 11702 11703 float64 VFP_HELPER(neg, d)(float64 a) 11704 { 11705 return float64_chs(a); 11706 } 11707 11708 float32 VFP_HELPER(abs, s)(float32 a) 11709 { 11710 return float32_abs(a); 11711 } 11712 11713 float64 VFP_HELPER(abs, d)(float64 a) 11714 { 11715 return float64_abs(a); 11716 } 11717 11718 float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env) 11719 { 11720 return float32_sqrt(a, &env->vfp.fp_status); 11721 } 11722 11723 float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env) 11724 { 11725 return float64_sqrt(a, &env->vfp.fp_status); 11726 } 11727 11728 /* XXX: check quiet/signaling case */ 11729 #define DO_VFP_cmp(p, type) \ 11730 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \ 11731 { \ 11732 uint32_t flags; \ 11733 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \ 11734 case 0: flags = 0x6; break; \ 11735 case -1: flags = 0x8; break; \ 11736 case 1: flags = 0x2; break; \ 11737 default: case 2: flags = 0x3; break; \ 11738 } \ 11739 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ 11740 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ 11741 } \ 11742 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \ 11743 { \ 11744 uint32_t flags; \ 11745 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \ 11746 case 0: flags = 0x6; break; \ 11747 case -1: flags = 0x8; break; \ 11748 case 1: flags = 0x2; break; \ 11749 default: case 2: flags = 0x3; break; \ 11750 } \ 11751 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ 11752 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ 11753 } 11754 DO_VFP_cmp(s, float32) 11755 DO_VFP_cmp(d, float64) 11756 #undef DO_VFP_cmp 11757 11758 /* Integer to float and float to integer conversions */ 11759 11760 #define CONV_ITOF(name, ftype, fsz, sign) \ 11761 ftype HELPER(name)(uint32_t x, void *fpstp) \ 11762 { \ 11763 float_status *fpst = fpstp; \ 11764 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \ 11765 } 11766 11767 #define CONV_FTOI(name, ftype, fsz, sign, round) \ 11768 sign##int32_t HELPER(name)(ftype x, void *fpstp) \ 11769 { \ 11770 float_status *fpst = fpstp; \ 11771 if (float##fsz##_is_any_nan(x)) { \ 11772 float_raise(float_flag_invalid, fpst); \ 11773 return 0; \ 11774 } \ 11775 return float##fsz##_to_##sign##int32##round(x, fpst); \ 11776 } 11777 11778 #define FLOAT_CONVS(name, p, ftype, fsz, sign) \ 11779 CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \ 11780 CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \ 11781 CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero) 11782 11783 FLOAT_CONVS(si, h, uint32_t, 16, ) 11784 FLOAT_CONVS(si, s, float32, 32, ) 11785 FLOAT_CONVS(si, d, float64, 64, ) 11786 FLOAT_CONVS(ui, h, uint32_t, 16, u) 11787 FLOAT_CONVS(ui, s, float32, 32, u) 11788 FLOAT_CONVS(ui, d, float64, 64, u) 11789 11790 #undef CONV_ITOF 11791 #undef CONV_FTOI 11792 #undef FLOAT_CONVS 11793 11794 /* floating point conversion */ 11795 float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) 11796 { 11797 return float32_to_float64(x, &env->vfp.fp_status); 11798 } 11799 11800 float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) 11801 { 11802 return float64_to_float32(x, &env->vfp.fp_status); 11803 } 11804 11805 /* VFP3 fixed point conversion. */ 11806 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11807 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ 11808 void *fpstp) \ 11809 { return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); } 11810 11811 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ROUND, suff) \ 11812 uint##isz##_t HELPER(vfp_to##name##p##suff)(float##fsz x, uint32_t shift, \ 11813 void *fpst) \ 11814 { \ 11815 if (unlikely(float##fsz##_is_any_nan(x))) { \ 11816 float_raise(float_flag_invalid, fpst); \ 11817 return 0; \ 11818 } \ 11819 return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \ 11820 } 11821 11822 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \ 11823 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11824 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ 11825 float_round_to_zero, _round_to_zero) \ 11826 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ 11827 get_float_rounding_mode(fpst), ) 11828 11829 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \ 11830 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11831 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ 11832 get_float_rounding_mode(fpst), ) 11833 11834 VFP_CONV_FIX(sh, d, 64, 64, int16) 11835 VFP_CONV_FIX(sl, d, 64, 64, int32) 11836 VFP_CONV_FIX_A64(sq, d, 64, 64, int64) 11837 VFP_CONV_FIX(uh, d, 64, 64, uint16) 11838 VFP_CONV_FIX(ul, d, 64, 64, uint32) 11839 VFP_CONV_FIX_A64(uq, d, 64, 64, uint64) 11840 VFP_CONV_FIX(sh, s, 32, 32, int16) 11841 VFP_CONV_FIX(sl, s, 32, 32, int32) 11842 VFP_CONV_FIX_A64(sq, s, 32, 64, int64) 11843 VFP_CONV_FIX(uh, s, 32, 32, uint16) 11844 VFP_CONV_FIX(ul, s, 32, 32, uint32) 11845 VFP_CONV_FIX_A64(uq, s, 32, 64, uint64) 11846 11847 #undef VFP_CONV_FIX 11848 #undef VFP_CONV_FIX_FLOAT 11849 #undef VFP_CONV_FLOAT_FIX_ROUND 11850 #undef VFP_CONV_FIX_A64 11851 11852 uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst) 11853 { 11854 return int32_to_float16_scalbn(x, -shift, fpst); 11855 } 11856 11857 uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst) 11858 { 11859 return uint32_to_float16_scalbn(x, -shift, fpst); 11860 } 11861 11862 uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst) 11863 { 11864 return int64_to_float16_scalbn(x, -shift, fpst); 11865 } 11866 11867 uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst) 11868 { 11869 return uint64_to_float16_scalbn(x, -shift, fpst); 11870 } 11871 11872 uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst) 11873 { 11874 if (unlikely(float16_is_any_nan(x))) { 11875 float_raise(float_flag_invalid, fpst); 11876 return 0; 11877 } 11878 return float16_to_int16_scalbn(x, get_float_rounding_mode(fpst), 11879 shift, fpst); 11880 } 11881 11882 uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst) 11883 { 11884 if (unlikely(float16_is_any_nan(x))) { 11885 float_raise(float_flag_invalid, fpst); 11886 return 0; 11887 } 11888 return float16_to_uint16_scalbn(x, get_float_rounding_mode(fpst), 11889 shift, fpst); 11890 } 11891 11892 uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst) 11893 { 11894 if (unlikely(float16_is_any_nan(x))) { 11895 float_raise(float_flag_invalid, fpst); 11896 return 0; 11897 } 11898 return float16_to_int32_scalbn(x, get_float_rounding_mode(fpst), 11899 shift, fpst); 11900 } 11901 11902 uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst) 11903 { 11904 if (unlikely(float16_is_any_nan(x))) { 11905 float_raise(float_flag_invalid, fpst); 11906 return 0; 11907 } 11908 return float16_to_uint32_scalbn(x, get_float_rounding_mode(fpst), 11909 shift, fpst); 11910 } 11911 11912 uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst) 11913 { 11914 if (unlikely(float16_is_any_nan(x))) { 11915 float_raise(float_flag_invalid, fpst); 11916 return 0; 11917 } 11918 return float16_to_int64_scalbn(x, get_float_rounding_mode(fpst), 11919 shift, fpst); 11920 } 11921 11922 uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst) 11923 { 11924 if (unlikely(float16_is_any_nan(x))) { 11925 float_raise(float_flag_invalid, fpst); 11926 return 0; 11927 } 11928 return float16_to_uint64_scalbn(x, get_float_rounding_mode(fpst), 11929 shift, fpst); 11930 } 11931 11932 /* Set the current fp rounding mode and return the old one. 11933 * The argument is a softfloat float_round_ value. 11934 */ 11935 uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp) 11936 { 11937 float_status *fp_status = fpstp; 11938 11939 uint32_t prev_rmode = get_float_rounding_mode(fp_status); 11940 set_float_rounding_mode(rmode, fp_status); 11941 11942 return prev_rmode; 11943 } 11944 11945 /* Set the current fp rounding mode in the standard fp status and return 11946 * the old one. This is for NEON instructions that need to change the 11947 * rounding mode but wish to use the standard FPSCR values for everything 11948 * else. Always set the rounding mode back to the correct value after 11949 * modifying it. 11950 * The argument is a softfloat float_round_ value. 11951 */ 11952 uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env) 11953 { 11954 float_status *fp_status = &env->vfp.standard_fp_status; 11955 11956 uint32_t prev_rmode = get_float_rounding_mode(fp_status); 11957 set_float_rounding_mode(rmode, fp_status); 11958 11959 return prev_rmode; 11960 } 11961 11962 /* Half precision conversions. */ 11963 float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode) 11964 { 11965 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11966 * it would affect flushing input denormals. 11967 */ 11968 float_status *fpst = fpstp; 11969 flag save = get_flush_inputs_to_zero(fpst); 11970 set_flush_inputs_to_zero(false, fpst); 11971 float32 r = float16_to_float32(a, !ahp_mode, fpst); 11972 set_flush_inputs_to_zero(save, fpst); 11973 return r; 11974 } 11975 11976 uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode) 11977 { 11978 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11979 * it would affect flushing output denormals. 11980 */ 11981 float_status *fpst = fpstp; 11982 flag save = get_flush_to_zero(fpst); 11983 set_flush_to_zero(false, fpst); 11984 float16 r = float32_to_float16(a, !ahp_mode, fpst); 11985 set_flush_to_zero(save, fpst); 11986 return r; 11987 } 11988 11989 float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode) 11990 { 11991 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11992 * it would affect flushing input denormals. 11993 */ 11994 float_status *fpst = fpstp; 11995 flag save = get_flush_inputs_to_zero(fpst); 11996 set_flush_inputs_to_zero(false, fpst); 11997 float64 r = float16_to_float64(a, !ahp_mode, fpst); 11998 set_flush_inputs_to_zero(save, fpst); 11999 return r; 12000 } 12001 12002 uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode) 12003 { 12004 /* Squash FZ16 to 0 for the duration of conversion. In this case, 12005 * it would affect flushing output denormals. 12006 */ 12007 float_status *fpst = fpstp; 12008 flag save = get_flush_to_zero(fpst); 12009 set_flush_to_zero(false, fpst); 12010 float16 r = float64_to_float16(a, !ahp_mode, fpst); 12011 set_flush_to_zero(save, fpst); 12012 return r; 12013 } 12014 12015 #define float32_two make_float32(0x40000000) 12016 #define float32_three make_float32(0x40400000) 12017 #define float32_one_point_five make_float32(0x3fc00000) 12018 12019 float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env) 12020 { 12021 float_status *s = &env->vfp.standard_fp_status; 12022 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || 12023 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { 12024 if (!(float32_is_zero(a) || float32_is_zero(b))) { 12025 float_raise(float_flag_input_denormal, s); 12026 } 12027 return float32_two; 12028 } 12029 return float32_sub(float32_two, float32_mul(a, b, s), s); 12030 } 12031 12032 float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env) 12033 { 12034 float_status *s = &env->vfp.standard_fp_status; 12035 float32 product; 12036 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || 12037 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { 12038 if (!(float32_is_zero(a) || float32_is_zero(b))) { 12039 float_raise(float_flag_input_denormal, s); 12040 } 12041 return float32_one_point_five; 12042 } 12043 product = float32_mul(a, b, s); 12044 return float32_div(float32_sub(float32_three, product, s), float32_two, s); 12045 } 12046 12047 /* NEON helpers. */ 12048 12049 /* Constants 256 and 512 are used in some helpers; we avoid relying on 12050 * int->float conversions at run-time. */ 12051 #define float64_256 make_float64(0x4070000000000000LL) 12052 #define float64_512 make_float64(0x4080000000000000LL) 12053 #define float16_maxnorm make_float16(0x7bff) 12054 #define float32_maxnorm make_float32(0x7f7fffff) 12055 #define float64_maxnorm make_float64(0x7fefffffffffffffLL) 12056 12057 /* Reciprocal functions 12058 * 12059 * The algorithm that must be used to calculate the estimate 12060 * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate 12061 */ 12062 12063 /* See RecipEstimate() 12064 * 12065 * input is a 9 bit fixed point number 12066 * input range 256 .. 511 for a number from 0.5 <= x < 1.0. 12067 * result range 256 .. 511 for a number from 1.0 to 511/256. 12068 */ 12069 12070 static int recip_estimate(int input) 12071 { 12072 int a, b, r; 12073 assert(256 <= input && input < 512); 12074 a = (input * 2) + 1; 12075 b = (1 << 19) / a; 12076 r = (b + 1) >> 1; 12077 assert(256 <= r && r < 512); 12078 return r; 12079 } 12080 12081 /* 12082 * Common wrapper to call recip_estimate 12083 * 12084 * The parameters are exponent and 64 bit fraction (without implicit 12085 * bit) where the binary point is nominally at bit 52. Returns a 12086 * float64 which can then be rounded to the appropriate size by the 12087 * callee. 12088 */ 12089 12090 static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac) 12091 { 12092 uint32_t scaled, estimate; 12093 uint64_t result_frac; 12094 int result_exp; 12095 12096 /* Handle sub-normals */ 12097 if (*exp == 0) { 12098 if (extract64(frac, 51, 1) == 0) { 12099 *exp = -1; 12100 frac <<= 2; 12101 } else { 12102 frac <<= 1; 12103 } 12104 } 12105 12106 /* scaled = UInt('1':fraction<51:44>) */ 12107 scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); 12108 estimate = recip_estimate(scaled); 12109 12110 result_exp = exp_off - *exp; 12111 result_frac = deposit64(0, 44, 8, estimate); 12112 if (result_exp == 0) { 12113 result_frac = deposit64(result_frac >> 1, 51, 1, 1); 12114 } else if (result_exp == -1) { 12115 result_frac = deposit64(result_frac >> 2, 50, 2, 1); 12116 result_exp = 0; 12117 } 12118 12119 *exp = result_exp; 12120 12121 return result_frac; 12122 } 12123 12124 static bool round_to_inf(float_status *fpst, bool sign_bit) 12125 { 12126 switch (fpst->float_rounding_mode) { 12127 case float_round_nearest_even: /* Round to Nearest */ 12128 return true; 12129 case float_round_up: /* Round to +Inf */ 12130 return !sign_bit; 12131 case float_round_down: /* Round to -Inf */ 12132 return sign_bit; 12133 case float_round_to_zero: /* Round to Zero */ 12134 return false; 12135 } 12136 12137 g_assert_not_reached(); 12138 } 12139 12140 uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp) 12141 { 12142 float_status *fpst = fpstp; 12143 float16 f16 = float16_squash_input_denormal(input, fpst); 12144 uint32_t f16_val = float16_val(f16); 12145 uint32_t f16_sign = float16_is_neg(f16); 12146 int f16_exp = extract32(f16_val, 10, 5); 12147 uint32_t f16_frac = extract32(f16_val, 0, 10); 12148 uint64_t f64_frac; 12149 12150 if (float16_is_any_nan(f16)) { 12151 float16 nan = f16; 12152 if (float16_is_signaling_nan(f16, fpst)) { 12153 float_raise(float_flag_invalid, fpst); 12154 nan = float16_silence_nan(f16, fpst); 12155 } 12156 if (fpst->default_nan_mode) { 12157 nan = float16_default_nan(fpst); 12158 } 12159 return nan; 12160 } else if (float16_is_infinity(f16)) { 12161 return float16_set_sign(float16_zero, float16_is_neg(f16)); 12162 } else if (float16_is_zero(f16)) { 12163 float_raise(float_flag_divbyzero, fpst); 12164 return float16_set_sign(float16_infinity, float16_is_neg(f16)); 12165 } else if (float16_abs(f16) < (1 << 8)) { 12166 /* Abs(value) < 2.0^-16 */ 12167 float_raise(float_flag_overflow | float_flag_inexact, fpst); 12168 if (round_to_inf(fpst, f16_sign)) { 12169 return float16_set_sign(float16_infinity, f16_sign); 12170 } else { 12171 return float16_set_sign(float16_maxnorm, f16_sign); 12172 } 12173 } else if (f16_exp >= 29 && fpst->flush_to_zero) { 12174 float_raise(float_flag_underflow, fpst); 12175 return float16_set_sign(float16_zero, float16_is_neg(f16)); 12176 } 12177 12178 f64_frac = call_recip_estimate(&f16_exp, 29, 12179 ((uint64_t) f16_frac) << (52 - 10)); 12180 12181 /* result = sign : result_exp<4:0> : fraction<51:42> */ 12182 f16_val = deposit32(0, 15, 1, f16_sign); 12183 f16_val = deposit32(f16_val, 10, 5, f16_exp); 12184 f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10)); 12185 return make_float16(f16_val); 12186 } 12187 12188 float32 HELPER(recpe_f32)(float32 input, void *fpstp) 12189 { 12190 float_status *fpst = fpstp; 12191 float32 f32 = float32_squash_input_denormal(input, fpst); 12192 uint32_t f32_val = float32_val(f32); 12193 bool f32_sign = float32_is_neg(f32); 12194 int f32_exp = extract32(f32_val, 23, 8); 12195 uint32_t f32_frac = extract32(f32_val, 0, 23); 12196 uint64_t f64_frac; 12197 12198 if (float32_is_any_nan(f32)) { 12199 float32 nan = f32; 12200 if (float32_is_signaling_nan(f32, fpst)) { 12201 float_raise(float_flag_invalid, fpst); 12202 nan = float32_silence_nan(f32, fpst); 12203 } 12204 if (fpst->default_nan_mode) { 12205 nan = float32_default_nan(fpst); 12206 } 12207 return nan; 12208 } else if (float32_is_infinity(f32)) { 12209 return float32_set_sign(float32_zero, float32_is_neg(f32)); 12210 } else if (float32_is_zero(f32)) { 12211 float_raise(float_flag_divbyzero, fpst); 12212 return float32_set_sign(float32_infinity, float32_is_neg(f32)); 12213 } else if (float32_abs(f32) < (1ULL << 21)) { 12214 /* Abs(value) < 2.0^-128 */ 12215 float_raise(float_flag_overflow | float_flag_inexact, fpst); 12216 if (round_to_inf(fpst, f32_sign)) { 12217 return float32_set_sign(float32_infinity, f32_sign); 12218 } else { 12219 return float32_set_sign(float32_maxnorm, f32_sign); 12220 } 12221 } else if (f32_exp >= 253 && fpst->flush_to_zero) { 12222 float_raise(float_flag_underflow, fpst); 12223 return float32_set_sign(float32_zero, float32_is_neg(f32)); 12224 } 12225 12226 f64_frac = call_recip_estimate(&f32_exp, 253, 12227 ((uint64_t) f32_frac) << (52 - 23)); 12228 12229 /* result = sign : result_exp<7:0> : fraction<51:29> */ 12230 f32_val = deposit32(0, 31, 1, f32_sign); 12231 f32_val = deposit32(f32_val, 23, 8, f32_exp); 12232 f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23)); 12233 return make_float32(f32_val); 12234 } 12235 12236 float64 HELPER(recpe_f64)(float64 input, void *fpstp) 12237 { 12238 float_status *fpst = fpstp; 12239 float64 f64 = float64_squash_input_denormal(input, fpst); 12240 uint64_t f64_val = float64_val(f64); 12241 bool f64_sign = float64_is_neg(f64); 12242 int f64_exp = extract64(f64_val, 52, 11); 12243 uint64_t f64_frac = extract64(f64_val, 0, 52); 12244 12245 /* Deal with any special cases */ 12246 if (float64_is_any_nan(f64)) { 12247 float64 nan = f64; 12248 if (float64_is_signaling_nan(f64, fpst)) { 12249 float_raise(float_flag_invalid, fpst); 12250 nan = float64_silence_nan(f64, fpst); 12251 } 12252 if (fpst->default_nan_mode) { 12253 nan = float64_default_nan(fpst); 12254 } 12255 return nan; 12256 } else if (float64_is_infinity(f64)) { 12257 return float64_set_sign(float64_zero, float64_is_neg(f64)); 12258 } else if (float64_is_zero(f64)) { 12259 float_raise(float_flag_divbyzero, fpst); 12260 return float64_set_sign(float64_infinity, float64_is_neg(f64)); 12261 } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) { 12262 /* Abs(value) < 2.0^-1024 */ 12263 float_raise(float_flag_overflow | float_flag_inexact, fpst); 12264 if (round_to_inf(fpst, f64_sign)) { 12265 return float64_set_sign(float64_infinity, f64_sign); 12266 } else { 12267 return float64_set_sign(float64_maxnorm, f64_sign); 12268 } 12269 } else if (f64_exp >= 2045 && fpst->flush_to_zero) { 12270 float_raise(float_flag_underflow, fpst); 12271 return float64_set_sign(float64_zero, float64_is_neg(f64)); 12272 } 12273 12274 f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac); 12275 12276 /* result = sign : result_exp<10:0> : fraction<51:0>; */ 12277 f64_val = deposit64(0, 63, 1, f64_sign); 12278 f64_val = deposit64(f64_val, 52, 11, f64_exp); 12279 f64_val = deposit64(f64_val, 0, 52, f64_frac); 12280 return make_float64(f64_val); 12281 } 12282 12283 /* The algorithm that must be used to calculate the estimate 12284 * is specified by the ARM ARM. 12285 */ 12286 12287 static int do_recip_sqrt_estimate(int a) 12288 { 12289 int b, estimate; 12290 12291 assert(128 <= a && a < 512); 12292 if (a < 256) { 12293 a = a * 2 + 1; 12294 } else { 12295 a = (a >> 1) << 1; 12296 a = (a + 1) * 2; 12297 } 12298 b = 512; 12299 while (a * (b + 1) * (b + 1) < (1 << 28)) { 12300 b += 1; 12301 } 12302 estimate = (b + 1) / 2; 12303 assert(256 <= estimate && estimate < 512); 12304 12305 return estimate; 12306 } 12307 12308 12309 static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac) 12310 { 12311 int estimate; 12312 uint32_t scaled; 12313 12314 if (*exp == 0) { 12315 while (extract64(frac, 51, 1) == 0) { 12316 frac = frac << 1; 12317 *exp -= 1; 12318 } 12319 frac = extract64(frac, 0, 51) << 1; 12320 } 12321 12322 if (*exp & 1) { 12323 /* scaled = UInt('01':fraction<51:45>) */ 12324 scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7)); 12325 } else { 12326 /* scaled = UInt('1':fraction<51:44>) */ 12327 scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); 12328 } 12329 estimate = do_recip_sqrt_estimate(scaled); 12330 12331 *exp = (exp_off - *exp) / 2; 12332 return extract64(estimate, 0, 8) << 44; 12333 } 12334 12335 uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp) 12336 { 12337 float_status *s = fpstp; 12338 float16 f16 = float16_squash_input_denormal(input, s); 12339 uint16_t val = float16_val(f16); 12340 bool f16_sign = float16_is_neg(f16); 12341 int f16_exp = extract32(val, 10, 5); 12342 uint16_t f16_frac = extract32(val, 0, 10); 12343 uint64_t f64_frac; 12344 12345 if (float16_is_any_nan(f16)) { 12346 float16 nan = f16; 12347 if (float16_is_signaling_nan(f16, s)) { 12348 float_raise(float_flag_invalid, s); 12349 nan = float16_silence_nan(f16, s); 12350 } 12351 if (s->default_nan_mode) { 12352 nan = float16_default_nan(s); 12353 } 12354 return nan; 12355 } else if (float16_is_zero(f16)) { 12356 float_raise(float_flag_divbyzero, s); 12357 return float16_set_sign(float16_infinity, f16_sign); 12358 } else if (f16_sign) { 12359 float_raise(float_flag_invalid, s); 12360 return float16_default_nan(s); 12361 } else if (float16_is_infinity(f16)) { 12362 return float16_zero; 12363 } 12364 12365 /* Scale and normalize to a double-precision value between 0.25 and 1.0, 12366 * preserving the parity of the exponent. */ 12367 12368 f64_frac = ((uint64_t) f16_frac) << (52 - 10); 12369 12370 f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac); 12371 12372 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */ 12373 val = deposit32(0, 15, 1, f16_sign); 12374 val = deposit32(val, 10, 5, f16_exp); 12375 val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8)); 12376 return make_float16(val); 12377 } 12378 12379 float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) 12380 { 12381 float_status *s = fpstp; 12382 float32 f32 = float32_squash_input_denormal(input, s); 12383 uint32_t val = float32_val(f32); 12384 uint32_t f32_sign = float32_is_neg(f32); 12385 int f32_exp = extract32(val, 23, 8); 12386 uint32_t f32_frac = extract32(val, 0, 23); 12387 uint64_t f64_frac; 12388 12389 if (float32_is_any_nan(f32)) { 12390 float32 nan = f32; 12391 if (float32_is_signaling_nan(f32, s)) { 12392 float_raise(float_flag_invalid, s); 12393 nan = float32_silence_nan(f32, s); 12394 } 12395 if (s->default_nan_mode) { 12396 nan = float32_default_nan(s); 12397 } 12398 return nan; 12399 } else if (float32_is_zero(f32)) { 12400 float_raise(float_flag_divbyzero, s); 12401 return float32_set_sign(float32_infinity, float32_is_neg(f32)); 12402 } else if (float32_is_neg(f32)) { 12403 float_raise(float_flag_invalid, s); 12404 return float32_default_nan(s); 12405 } else if (float32_is_infinity(f32)) { 12406 return float32_zero; 12407 } 12408 12409 /* Scale and normalize to a double-precision value between 0.25 and 1.0, 12410 * preserving the parity of the exponent. */ 12411 12412 f64_frac = ((uint64_t) f32_frac) << 29; 12413 12414 f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac); 12415 12416 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */ 12417 val = deposit32(0, 31, 1, f32_sign); 12418 val = deposit32(val, 23, 8, f32_exp); 12419 val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8)); 12420 return make_float32(val); 12421 } 12422 12423 float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) 12424 { 12425 float_status *s = fpstp; 12426 float64 f64 = float64_squash_input_denormal(input, s); 12427 uint64_t val = float64_val(f64); 12428 bool f64_sign = float64_is_neg(f64); 12429 int f64_exp = extract64(val, 52, 11); 12430 uint64_t f64_frac = extract64(val, 0, 52); 12431 12432 if (float64_is_any_nan(f64)) { 12433 float64 nan = f64; 12434 if (float64_is_signaling_nan(f64, s)) { 12435 float_raise(float_flag_invalid, s); 12436 nan = float64_silence_nan(f64, s); 12437 } 12438 if (s->default_nan_mode) { 12439 nan = float64_default_nan(s); 12440 } 12441 return nan; 12442 } else if (float64_is_zero(f64)) { 12443 float_raise(float_flag_divbyzero, s); 12444 return float64_set_sign(float64_infinity, float64_is_neg(f64)); 12445 } else if (float64_is_neg(f64)) { 12446 float_raise(float_flag_invalid, s); 12447 return float64_default_nan(s); 12448 } else if (float64_is_infinity(f64)) { 12449 return float64_zero; 12450 } 12451 12452 f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac); 12453 12454 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */ 12455 val = deposit64(0, 61, 1, f64_sign); 12456 val = deposit64(val, 52, 11, f64_exp); 12457 val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8)); 12458 return make_float64(val); 12459 } 12460 12461 uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp) 12462 { 12463 /* float_status *s = fpstp; */ 12464 int input, estimate; 12465 12466 if ((a & 0x80000000) == 0) { 12467 return 0xffffffff; 12468 } 12469 12470 input = extract32(a, 23, 9); 12471 estimate = recip_estimate(input); 12472 12473 return deposit32(0, (32 - 9), 9, estimate); 12474 } 12475 12476 uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp) 12477 { 12478 int estimate; 12479 12480 if ((a & 0xc0000000) == 0) { 12481 return 0xffffffff; 12482 } 12483 12484 estimate = do_recip_sqrt_estimate(extract32(a, 23, 9)); 12485 12486 return deposit32(0, 23, 9, estimate); 12487 } 12488 12489 /* VFPv4 fused multiply-accumulate */ 12490 float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) 12491 { 12492 float_status *fpst = fpstp; 12493 return float32_muladd(a, b, c, 0, fpst); 12494 } 12495 12496 float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) 12497 { 12498 float_status *fpst = fpstp; 12499 return float64_muladd(a, b, c, 0, fpst); 12500 } 12501 12502 /* ARMv8 round to integral */ 12503 float32 HELPER(rints_exact)(float32 x, void *fp_status) 12504 { 12505 return float32_round_to_int(x, fp_status); 12506 } 12507 12508 float64 HELPER(rintd_exact)(float64 x, void *fp_status) 12509 { 12510 return float64_round_to_int(x, fp_status); 12511 } 12512 12513 float32 HELPER(rints)(float32 x, void *fp_status) 12514 { 12515 int old_flags = get_float_exception_flags(fp_status), new_flags; 12516 float32 ret; 12517 12518 ret = float32_round_to_int(x, fp_status); 12519 12520 /* Suppress any inexact exceptions the conversion produced */ 12521 if (!(old_flags & float_flag_inexact)) { 12522 new_flags = get_float_exception_flags(fp_status); 12523 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); 12524 } 12525 12526 return ret; 12527 } 12528 12529 float64 HELPER(rintd)(float64 x, void *fp_status) 12530 { 12531 int old_flags = get_float_exception_flags(fp_status), new_flags; 12532 float64 ret; 12533 12534 ret = float64_round_to_int(x, fp_status); 12535 12536 new_flags = get_float_exception_flags(fp_status); 12537 12538 /* Suppress any inexact exceptions the conversion produced */ 12539 if (!(old_flags & float_flag_inexact)) { 12540 new_flags = get_float_exception_flags(fp_status); 12541 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); 12542 } 12543 12544 return ret; 12545 } 12546 12547 /* Convert ARM rounding mode to softfloat */ 12548 int arm_rmode_to_sf(int rmode) 12549 { 12550 switch (rmode) { 12551 case FPROUNDING_TIEAWAY: 12552 rmode = float_round_ties_away; 12553 break; 12554 case FPROUNDING_ODD: 12555 /* FIXME: add support for TIEAWAY and ODD */ 12556 qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", 12557 rmode); 12558 /* fall through for now */ 12559 case FPROUNDING_TIEEVEN: 12560 default: 12561 rmode = float_round_nearest_even; 12562 break; 12563 case FPROUNDING_POSINF: 12564 rmode = float_round_up; 12565 break; 12566 case FPROUNDING_NEGINF: 12567 rmode = float_round_down; 12568 break; 12569 case FPROUNDING_ZERO: 12570 rmode = float_round_to_zero; 12571 break; 12572 } 12573 return rmode; 12574 } 12575 12576 /* CRC helpers. 12577 * The upper bytes of val (above the number specified by 'bytes') must have 12578 * been zeroed out by the caller. 12579 */ 12580 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) 12581 { 12582 uint8_t buf[4]; 12583 12584 stl_le_p(buf, val); 12585 12586 /* zlib crc32 converts the accumulator and output to one's complement. */ 12587 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; 12588 } 12589 12590 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) 12591 { 12592 uint8_t buf[4]; 12593 12594 stl_le_p(buf, val); 12595 12596 /* Linux crc32c converts the output to one's complement. */ 12597 return crc32c(acc, buf, bytes) ^ 0xffffffff; 12598 } 12599 12600 /* Return the exception level to which FP-disabled exceptions should 12601 * be taken, or 0 if FP is enabled. 12602 */ 12603 int fp_exception_el(CPUARMState *env, int cur_el) 12604 { 12605 #ifndef CONFIG_USER_ONLY 12606 int fpen; 12607 12608 /* CPACR and the CPTR registers don't exist before v6, so FP is 12609 * always accessible 12610 */ 12611 if (!arm_feature(env, ARM_FEATURE_V6)) { 12612 return 0; 12613 } 12614 12615 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: 12616 * 0, 2 : trap EL0 and EL1/PL1 accesses 12617 * 1 : trap only EL0 accesses 12618 * 3 : trap no accesses 12619 */ 12620 fpen = extract32(env->cp15.cpacr_el1, 20, 2); 12621 switch (fpen) { 12622 case 0: 12623 case 2: 12624 if (cur_el == 0 || cur_el == 1) { 12625 /* Trap to PL1, which might be EL1 or EL3 */ 12626 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 12627 return 3; 12628 } 12629 return 1; 12630 } 12631 if (cur_el == 3 && !is_a64(env)) { 12632 /* Secure PL1 running at EL3 */ 12633 return 3; 12634 } 12635 break; 12636 case 1: 12637 if (cur_el == 0) { 12638 return 1; 12639 } 12640 break; 12641 case 3: 12642 break; 12643 } 12644 12645 /* For the CPTR registers we don't need to guard with an ARM_FEATURE 12646 * check because zero bits in the registers mean "don't trap". 12647 */ 12648 12649 /* CPTR_EL2 : present in v7VE or v8 */ 12650 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1) 12651 && !arm_is_secure_below_el3(env)) { 12652 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */ 12653 return 2; 12654 } 12655 12656 /* CPTR_EL3 : present in v8 */ 12657 if (extract32(env->cp15.cptr_el[3], 10, 1)) { 12658 /* Trap all FP ops to EL3 */ 12659 return 3; 12660 } 12661 #endif 12662 return 0; 12663 } 12664 12665 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 12666 target_ulong *cs_base, uint32_t *pflags) 12667 { 12668 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 12669 int current_el = arm_current_el(env); 12670 int fp_el = fp_exception_el(env, current_el); 12671 uint32_t flags; 12672 12673 if (is_a64(env)) { 12674 *pc = env->pc; 12675 flags = ARM_TBFLAG_AARCH64_STATE_MASK; 12676 /* Get control bits for tagged addresses */ 12677 flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT); 12678 flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT); 12679 12680 if (arm_feature(env, ARM_FEATURE_SVE)) { 12681 int sve_el = sve_exception_el(env, current_el); 12682 uint32_t zcr_len; 12683 12684 /* If SVE is disabled, but FP is enabled, 12685 * then the effective len is 0. 12686 */ 12687 if (sve_el != 0 && fp_el == 0) { 12688 zcr_len = 0; 12689 } else { 12690 zcr_len = sve_zcr_len_for_el(env, current_el); 12691 } 12692 flags |= sve_el << ARM_TBFLAG_SVEEXC_EL_SHIFT; 12693 flags |= zcr_len << ARM_TBFLAG_ZCR_LEN_SHIFT; 12694 } 12695 } else { 12696 *pc = env->regs[15]; 12697 flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT) 12698 | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT) 12699 | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT) 12700 | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT) 12701 | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT); 12702 if (!(access_secure_reg(env))) { 12703 flags |= ARM_TBFLAG_NS_MASK; 12704 } 12705 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30) 12706 || arm_el_is_aa64(env, 1)) { 12707 flags |= ARM_TBFLAG_VFPEN_MASK; 12708 } 12709 flags |= (extract32(env->cp15.c15_cpar, 0, 2) 12710 << ARM_TBFLAG_XSCALE_CPAR_SHIFT); 12711 } 12712 12713 flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT); 12714 12715 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 12716 * states defined in the ARM ARM for software singlestep: 12717 * SS_ACTIVE PSTATE.SS State 12718 * 0 x Inactive (the TB flag for SS is always 0) 12719 * 1 0 Active-pending 12720 * 1 1 Active-not-pending 12721 */ 12722 if (arm_singlestep_active(env)) { 12723 flags |= ARM_TBFLAG_SS_ACTIVE_MASK; 12724 if (is_a64(env)) { 12725 if (env->pstate & PSTATE_SS) { 12726 flags |= ARM_TBFLAG_PSTATE_SS_MASK; 12727 } 12728 } else { 12729 if (env->uncached_cpsr & PSTATE_SS) { 12730 flags |= ARM_TBFLAG_PSTATE_SS_MASK; 12731 } 12732 } 12733 } 12734 if (arm_cpu_data_is_big_endian(env)) { 12735 flags |= ARM_TBFLAG_BE_DATA_MASK; 12736 } 12737 flags |= fp_el << ARM_TBFLAG_FPEXC_EL_SHIFT; 12738 12739 if (arm_v7m_is_handler_mode(env)) { 12740 flags |= ARM_TBFLAG_HANDLER_MASK; 12741 } 12742 12743 /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is 12744 * suppressing them because the requested execution priority is less than 0. 12745 */ 12746 if (arm_feature(env, ARM_FEATURE_V8) && 12747 arm_feature(env, ARM_FEATURE_M) && 12748 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) && 12749 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) { 12750 flags |= ARM_TBFLAG_STACKCHECK_MASK; 12751 } 12752 12753 *pflags = flags; 12754 *cs_base = 0; 12755 } 12756 12757 #ifdef TARGET_AARCH64 12758 /* 12759 * The manual says that when SVE is enabled and VQ is widened the 12760 * implementation is allowed to zero the previously inaccessible 12761 * portion of the registers. The corollary to that is that when 12762 * SVE is enabled and VQ is narrowed we are also allowed to zero 12763 * the now inaccessible portion of the registers. 12764 * 12765 * The intent of this is that no predicate bit beyond VQ is ever set. 12766 * Which means that some operations on predicate registers themselves 12767 * may operate on full uint64_t or even unrolled across the maximum 12768 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally 12769 * may well be cheaper than conditionals to restrict the operation 12770 * to the relevant portion of a uint16_t[16]. 12771 */ 12772 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) 12773 { 12774 int i, j; 12775 uint64_t pmask; 12776 12777 assert(vq >= 1 && vq <= ARM_MAX_VQ); 12778 assert(vq <= arm_env_get_cpu(env)->sve_max_vq); 12779 12780 /* Zap the high bits of the zregs. */ 12781 for (i = 0; i < 32; i++) { 12782 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); 12783 } 12784 12785 /* Zap the high bits of the pregs and ffr. */ 12786 pmask = 0; 12787 if (vq & 3) { 12788 pmask = ~(-1ULL << (16 * (vq & 3))); 12789 } 12790 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) { 12791 for (i = 0; i < 17; ++i) { 12792 env->vfp.pregs[i].p[j] &= pmask; 12793 } 12794 pmask = 0; 12795 } 12796 } 12797 12798 /* 12799 * Notice a change in SVE vector size when changing EL. 12800 */ 12801 void aarch64_sve_change_el(CPUARMState *env, int old_el, 12802 int new_el, bool el0_a64) 12803 { 12804 int old_len, new_len; 12805 bool old_a64, new_a64; 12806 12807 /* Nothing to do if no SVE. */ 12808 if (!arm_feature(env, ARM_FEATURE_SVE)) { 12809 return; 12810 } 12811 12812 /* Nothing to do if FP is disabled in either EL. */ 12813 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) { 12814 return; 12815 } 12816 12817 /* 12818 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped 12819 * at ELx, or not available because the EL is in AArch32 state, then 12820 * for all purposes other than a direct read, the ZCR_ELx.LEN field 12821 * has an effective value of 0". 12822 * 12823 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). 12824 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition 12825 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that 12826 * we already have the correct register contents when encountering the 12827 * vq0->vq0 transition between EL0->EL1. 12828 */ 12829 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; 12830 old_len = (old_a64 && !sve_exception_el(env, old_el) 12831 ? sve_zcr_len_for_el(env, old_el) : 0); 12832 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; 12833 new_len = (new_a64 && !sve_exception_el(env, new_el) 12834 ? sve_zcr_len_for_el(env, new_el) : 0); 12835 12836 /* When changing vector length, clear inaccessible state. */ 12837 if (new_len < old_len) { 12838 aarch64_sve_narrow_vq(env, new_len + 1); 12839 } 12840 } 12841 #endif 12842