1 #include "qemu/osdep.h" 2 #include "target/arm/idau.h" 3 #include "trace.h" 4 #include "cpu.h" 5 #include "internals.h" 6 #include "exec/gdbstub.h" 7 #include "exec/helper-proto.h" 8 #include "qemu/host-utils.h" 9 #include "sysemu/arch_init.h" 10 #include "sysemu/sysemu.h" 11 #include "qemu/bitops.h" 12 #include "qemu/crc32c.h" 13 #include "exec/exec-all.h" 14 #include "exec/cpu_ldst.h" 15 #include "arm_ldst.h" 16 #include <zlib.h> /* For crc32 */ 17 #include "exec/semihost.h" 18 #include "sysemu/kvm.h" 19 #include "fpu/softfloat.h" 20 #include "qemu/range.h" 21 22 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ 23 24 #ifndef CONFIG_USER_ONLY 25 /* Cacheability and shareability attributes for a memory access */ 26 typedef struct ARMCacheAttrs { 27 unsigned int attrs:8; /* as in the MAIR register encoding */ 28 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 29 } ARMCacheAttrs; 30 31 static bool get_phys_addr(CPUARMState *env, target_ulong address, 32 MMUAccessType access_type, ARMMMUIdx mmu_idx, 33 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 34 target_ulong *page_size, 35 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 36 37 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 38 MMUAccessType access_type, ARMMMUIdx mmu_idx, 39 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 40 target_ulong *page_size_ptr, 41 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 42 43 /* Security attributes for an address, as returned by v8m_security_lookup. */ 44 typedef struct V8M_SAttributes { 45 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 46 bool ns; 47 bool nsc; 48 uint8_t sregion; 49 bool srvalid; 50 uint8_t iregion; 51 bool irvalid; 52 } V8M_SAttributes; 53 54 static void v8m_security_lookup(CPUARMState *env, uint32_t address, 55 MMUAccessType access_type, ARMMMUIdx mmu_idx, 56 V8M_SAttributes *sattrs); 57 #endif 58 59 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 60 { 61 int nregs; 62 63 /* VFP data registers are always little-endian. */ 64 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 65 if (reg < nregs) { 66 stq_le_p(buf, *aa32_vfp_dreg(env, reg)); 67 return 8; 68 } 69 if (arm_feature(env, ARM_FEATURE_NEON)) { 70 /* Aliases for Q regs. */ 71 nregs += 16; 72 if (reg < nregs) { 73 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 74 stq_le_p(buf, q[0]); 75 stq_le_p(buf + 8, q[1]); 76 return 16; 77 } 78 } 79 switch (reg - nregs) { 80 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4; 81 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4; 82 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4; 83 } 84 return 0; 85 } 86 87 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 88 { 89 int nregs; 90 91 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 92 if (reg < nregs) { 93 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); 94 return 8; 95 } 96 if (arm_feature(env, ARM_FEATURE_NEON)) { 97 nregs += 16; 98 if (reg < nregs) { 99 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 100 q[0] = ldq_le_p(buf); 101 q[1] = ldq_le_p(buf + 8); 102 return 16; 103 } 104 } 105 switch (reg - nregs) { 106 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; 107 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4; 108 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4; 109 } 110 return 0; 111 } 112 113 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 114 { 115 switch (reg) { 116 case 0 ... 31: 117 /* 128 bit FP register */ 118 { 119 uint64_t *q = aa64_vfp_qreg(env, reg); 120 stq_le_p(buf, q[0]); 121 stq_le_p(buf + 8, q[1]); 122 return 16; 123 } 124 case 32: 125 /* FPSR */ 126 stl_p(buf, vfp_get_fpsr(env)); 127 return 4; 128 case 33: 129 /* FPCR */ 130 stl_p(buf, vfp_get_fpcr(env)); 131 return 4; 132 default: 133 return 0; 134 } 135 } 136 137 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 138 { 139 switch (reg) { 140 case 0 ... 31: 141 /* 128 bit FP register */ 142 { 143 uint64_t *q = aa64_vfp_qreg(env, reg); 144 q[0] = ldq_le_p(buf); 145 q[1] = ldq_le_p(buf + 8); 146 return 16; 147 } 148 case 32: 149 /* FPSR */ 150 vfp_set_fpsr(env, ldl_p(buf)); 151 return 4; 152 case 33: 153 /* FPCR */ 154 vfp_set_fpcr(env, ldl_p(buf)); 155 return 4; 156 default: 157 return 0; 158 } 159 } 160 161 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) 162 { 163 assert(ri->fieldoffset); 164 if (cpreg_field_is_64bit(ri)) { 165 return CPREG_FIELD64(env, ri); 166 } else { 167 return CPREG_FIELD32(env, ri); 168 } 169 } 170 171 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 172 uint64_t value) 173 { 174 assert(ri->fieldoffset); 175 if (cpreg_field_is_64bit(ri)) { 176 CPREG_FIELD64(env, ri) = value; 177 } else { 178 CPREG_FIELD32(env, ri) = value; 179 } 180 } 181 182 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) 183 { 184 return (char *)env + ri->fieldoffset; 185 } 186 187 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) 188 { 189 /* Raw read of a coprocessor register (as needed for migration, etc). */ 190 if (ri->type & ARM_CP_CONST) { 191 return ri->resetvalue; 192 } else if (ri->raw_readfn) { 193 return ri->raw_readfn(env, ri); 194 } else if (ri->readfn) { 195 return ri->readfn(env, ri); 196 } else { 197 return raw_read(env, ri); 198 } 199 } 200 201 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, 202 uint64_t v) 203 { 204 /* Raw write of a coprocessor register (as needed for migration, etc). 205 * Note that constant registers are treated as write-ignored; the 206 * caller should check for success by whether a readback gives the 207 * value written. 208 */ 209 if (ri->type & ARM_CP_CONST) { 210 return; 211 } else if (ri->raw_writefn) { 212 ri->raw_writefn(env, ri, v); 213 } else if (ri->writefn) { 214 ri->writefn(env, ri, v); 215 } else { 216 raw_write(env, ri, v); 217 } 218 } 219 220 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg) 221 { 222 ARMCPU *cpu = arm_env_get_cpu(env); 223 const ARMCPRegInfo *ri; 224 uint32_t key; 225 226 key = cpu->dyn_xml.cpregs_keys[reg]; 227 ri = get_arm_cp_reginfo(cpu->cp_regs, key); 228 if (ri) { 229 if (cpreg_field_is_64bit(ri)) { 230 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); 231 } else { 232 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); 233 } 234 } 235 return 0; 236 } 237 238 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) 239 { 240 return 0; 241 } 242 243 static bool raw_accessors_invalid(const ARMCPRegInfo *ri) 244 { 245 /* Return true if the regdef would cause an assertion if you called 246 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a 247 * program bug for it not to have the NO_RAW flag). 248 * NB that returning false here doesn't necessarily mean that calling 249 * read/write_raw_cp_reg() is safe, because we can't distinguish "has 250 * read/write access functions which are safe for raw use" from "has 251 * read/write access functions which have side effects but has forgotten 252 * to provide raw access functions". 253 * The tests here line up with the conditions in read/write_raw_cp_reg() 254 * and assertions in raw_read()/raw_write(). 255 */ 256 if ((ri->type & ARM_CP_CONST) || 257 ri->fieldoffset || 258 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { 259 return false; 260 } 261 return true; 262 } 263 264 bool write_cpustate_to_list(ARMCPU *cpu) 265 { 266 /* Write the coprocessor state from cpu->env to the (index,value) list. */ 267 int i; 268 bool ok = true; 269 270 for (i = 0; i < cpu->cpreg_array_len; i++) { 271 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 272 const ARMCPRegInfo *ri; 273 274 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 275 if (!ri) { 276 ok = false; 277 continue; 278 } 279 if (ri->type & ARM_CP_NO_RAW) { 280 continue; 281 } 282 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri); 283 } 284 return ok; 285 } 286 287 bool write_list_to_cpustate(ARMCPU *cpu) 288 { 289 int i; 290 bool ok = true; 291 292 for (i = 0; i < cpu->cpreg_array_len; i++) { 293 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 294 uint64_t v = cpu->cpreg_values[i]; 295 const ARMCPRegInfo *ri; 296 297 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 298 if (!ri) { 299 ok = false; 300 continue; 301 } 302 if (ri->type & ARM_CP_NO_RAW) { 303 continue; 304 } 305 /* Write value and confirm it reads back as written 306 * (to catch read-only registers and partially read-only 307 * registers where the incoming migration value doesn't match) 308 */ 309 write_raw_cp_reg(&cpu->env, ri, v); 310 if (read_raw_cp_reg(&cpu->env, ri) != v) { 311 ok = false; 312 } 313 } 314 return ok; 315 } 316 317 static void add_cpreg_to_list(gpointer key, gpointer opaque) 318 { 319 ARMCPU *cpu = opaque; 320 uint64_t regidx; 321 const ARMCPRegInfo *ri; 322 323 regidx = *(uint32_t *)key; 324 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 325 326 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 327 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); 328 /* The value array need not be initialized at this point */ 329 cpu->cpreg_array_len++; 330 } 331 } 332 333 static void count_cpreg(gpointer key, gpointer opaque) 334 { 335 ARMCPU *cpu = opaque; 336 uint64_t regidx; 337 const ARMCPRegInfo *ri; 338 339 regidx = *(uint32_t *)key; 340 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 341 342 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 343 cpu->cpreg_array_len++; 344 } 345 } 346 347 static gint cpreg_key_compare(gconstpointer a, gconstpointer b) 348 { 349 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); 350 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); 351 352 if (aidx > bidx) { 353 return 1; 354 } 355 if (aidx < bidx) { 356 return -1; 357 } 358 return 0; 359 } 360 361 void init_cpreg_list(ARMCPU *cpu) 362 { 363 /* Initialise the cpreg_tuples[] array based on the cp_regs hash. 364 * Note that we require cpreg_tuples[] to be sorted by key ID. 365 */ 366 GList *keys; 367 int arraylen; 368 369 keys = g_hash_table_get_keys(cpu->cp_regs); 370 keys = g_list_sort(keys, cpreg_key_compare); 371 372 cpu->cpreg_array_len = 0; 373 374 g_list_foreach(keys, count_cpreg, cpu); 375 376 arraylen = cpu->cpreg_array_len; 377 cpu->cpreg_indexes = g_new(uint64_t, arraylen); 378 cpu->cpreg_values = g_new(uint64_t, arraylen); 379 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); 380 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); 381 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 382 cpu->cpreg_array_len = 0; 383 384 g_list_foreach(keys, add_cpreg_to_list, cpu); 385 386 assert(cpu->cpreg_array_len == arraylen); 387 388 g_list_free(keys); 389 } 390 391 /* 392 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but 393 * they are accessible when EL3 is using AArch64 regardless of EL3.NS. 394 * 395 * access_el3_aa32ns: Used to check AArch32 register views. 396 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views. 397 */ 398 static CPAccessResult access_el3_aa32ns(CPUARMState *env, 399 const ARMCPRegInfo *ri, 400 bool isread) 401 { 402 bool secure = arm_is_secure_below_el3(env); 403 404 assert(!arm_el_is_aa64(env, 3)); 405 if (secure) { 406 return CP_ACCESS_TRAP_UNCATEGORIZED; 407 } 408 return CP_ACCESS_OK; 409 } 410 411 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env, 412 const ARMCPRegInfo *ri, 413 bool isread) 414 { 415 if (!arm_el_is_aa64(env, 3)) { 416 return access_el3_aa32ns(env, ri, isread); 417 } 418 return CP_ACCESS_OK; 419 } 420 421 /* Some secure-only AArch32 registers trap to EL3 if used from 422 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). 423 * Note that an access from Secure EL1 can only happen if EL3 is AArch64. 424 * We assume that the .access field is set to PL1_RW. 425 */ 426 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, 427 const ARMCPRegInfo *ri, 428 bool isread) 429 { 430 if (arm_current_el(env) == 3) { 431 return CP_ACCESS_OK; 432 } 433 if (arm_is_secure_below_el3(env)) { 434 return CP_ACCESS_TRAP_EL3; 435 } 436 /* This will be EL1 NS and EL2 NS, which just UNDEF */ 437 return CP_ACCESS_TRAP_UNCATEGORIZED; 438 } 439 440 /* Check for traps to "powerdown debug" registers, which are controlled 441 * by MDCR.TDOSA 442 */ 443 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 444 bool isread) 445 { 446 int el = arm_current_el(env); 447 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) || 448 (env->cp15.mdcr_el2 & MDCR_TDE) || 449 (env->cp15.hcr_el2 & HCR_TGE); 450 451 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) { 452 return CP_ACCESS_TRAP_EL2; 453 } 454 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 455 return CP_ACCESS_TRAP_EL3; 456 } 457 return CP_ACCESS_OK; 458 } 459 460 /* Check for traps to "debug ROM" registers, which are controlled 461 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 462 */ 463 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 464 bool isread) 465 { 466 int el = arm_current_el(env); 467 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) || 468 (env->cp15.mdcr_el2 & MDCR_TDE) || 469 (env->cp15.hcr_el2 & HCR_TGE); 470 471 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) { 472 return CP_ACCESS_TRAP_EL2; 473 } 474 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 475 return CP_ACCESS_TRAP_EL3; 476 } 477 return CP_ACCESS_OK; 478 } 479 480 /* Check for traps to general debug registers, which are controlled 481 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 482 */ 483 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 484 bool isread) 485 { 486 int el = arm_current_el(env); 487 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) || 488 (env->cp15.mdcr_el2 & MDCR_TDE) || 489 (env->cp15.hcr_el2 & HCR_TGE); 490 491 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) { 492 return CP_ACCESS_TRAP_EL2; 493 } 494 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 495 return CP_ACCESS_TRAP_EL3; 496 } 497 return CP_ACCESS_OK; 498 } 499 500 /* Check for traps to performance monitor registers, which are controlled 501 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. 502 */ 503 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, 504 bool isread) 505 { 506 int el = arm_current_el(env); 507 508 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 509 && !arm_is_secure_below_el3(env)) { 510 return CP_ACCESS_TRAP_EL2; 511 } 512 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 513 return CP_ACCESS_TRAP_EL3; 514 } 515 return CP_ACCESS_OK; 516 } 517 518 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 519 { 520 ARMCPU *cpu = arm_env_get_cpu(env); 521 522 raw_write(env, ri, value); 523 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ 524 } 525 526 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 527 { 528 ARMCPU *cpu = arm_env_get_cpu(env); 529 530 if (raw_read(env, ri) != value) { 531 /* Unlike real hardware the qemu TLB uses virtual addresses, 532 * not modified virtual addresses, so this causes a TLB flush. 533 */ 534 tlb_flush(CPU(cpu)); 535 raw_write(env, ri, value); 536 } 537 } 538 539 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 540 uint64_t value) 541 { 542 ARMCPU *cpu = arm_env_get_cpu(env); 543 544 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 545 && !extended_addresses_enabled(env)) { 546 /* For VMSA (when not using the LPAE long descriptor page table 547 * format) this register includes the ASID, so do a TLB flush. 548 * For PMSA it is purely a process ID and no action is needed. 549 */ 550 tlb_flush(CPU(cpu)); 551 } 552 raw_write(env, ri, value); 553 } 554 555 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, 556 uint64_t value) 557 { 558 /* Invalidate all (TLBIALL) */ 559 ARMCPU *cpu = arm_env_get_cpu(env); 560 561 tlb_flush(CPU(cpu)); 562 } 563 564 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, 565 uint64_t value) 566 { 567 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 568 ARMCPU *cpu = arm_env_get_cpu(env); 569 570 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); 571 } 572 573 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, 574 uint64_t value) 575 { 576 /* Invalidate by ASID (TLBIASID) */ 577 ARMCPU *cpu = arm_env_get_cpu(env); 578 579 tlb_flush(CPU(cpu)); 580 } 581 582 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, 583 uint64_t value) 584 { 585 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 586 ARMCPU *cpu = arm_env_get_cpu(env); 587 588 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); 589 } 590 591 /* IS variants of TLB operations must affect all cores */ 592 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 593 uint64_t value) 594 { 595 CPUState *cs = ENV_GET_CPU(env); 596 597 tlb_flush_all_cpus_synced(cs); 598 } 599 600 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 601 uint64_t value) 602 { 603 CPUState *cs = ENV_GET_CPU(env); 604 605 tlb_flush_all_cpus_synced(cs); 606 } 607 608 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 609 uint64_t value) 610 { 611 CPUState *cs = ENV_GET_CPU(env); 612 613 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 614 } 615 616 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 617 uint64_t value) 618 { 619 CPUState *cs = ENV_GET_CPU(env); 620 621 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 622 } 623 624 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, 625 uint64_t value) 626 { 627 CPUState *cs = ENV_GET_CPU(env); 628 629 tlb_flush_by_mmuidx(cs, 630 ARMMMUIdxBit_S12NSE1 | 631 ARMMMUIdxBit_S12NSE0 | 632 ARMMMUIdxBit_S2NS); 633 } 634 635 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 636 uint64_t value) 637 { 638 CPUState *cs = ENV_GET_CPU(env); 639 640 tlb_flush_by_mmuidx_all_cpus_synced(cs, 641 ARMMMUIdxBit_S12NSE1 | 642 ARMMMUIdxBit_S12NSE0 | 643 ARMMMUIdxBit_S2NS); 644 } 645 646 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, 647 uint64_t value) 648 { 649 /* Invalidate by IPA. This has to invalidate any structures that 650 * contain only stage 2 translation information, but does not need 651 * to apply to structures that contain combined stage 1 and stage 2 652 * translation information. 653 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 654 */ 655 CPUState *cs = ENV_GET_CPU(env); 656 uint64_t pageaddr; 657 658 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 659 return; 660 } 661 662 pageaddr = sextract64(value << 12, 0, 40); 663 664 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS); 665 } 666 667 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 668 uint64_t value) 669 { 670 CPUState *cs = ENV_GET_CPU(env); 671 uint64_t pageaddr; 672 673 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 674 return; 675 } 676 677 pageaddr = sextract64(value << 12, 0, 40); 678 679 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 680 ARMMMUIdxBit_S2NS); 681 } 682 683 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 684 uint64_t value) 685 { 686 CPUState *cs = ENV_GET_CPU(env); 687 688 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 689 } 690 691 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 692 uint64_t value) 693 { 694 CPUState *cs = ENV_GET_CPU(env); 695 696 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 697 } 698 699 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 700 uint64_t value) 701 { 702 CPUState *cs = ENV_GET_CPU(env); 703 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 704 705 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); 706 } 707 708 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 709 uint64_t value) 710 { 711 CPUState *cs = ENV_GET_CPU(env); 712 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 713 714 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 715 ARMMMUIdxBit_S1E2); 716 } 717 718 static const ARMCPRegInfo cp_reginfo[] = { 719 /* Define the secure and non-secure FCSE identifier CP registers 720 * separately because there is no secure bank in V8 (no _EL3). This allows 721 * the secure register to be properly reset and migrated. There is also no 722 * v8 EL1 version of the register so the non-secure instance stands alone. 723 */ 724 { .name = "FCSEIDR", 725 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 726 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 727 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), 728 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 729 { .name = "FCSEIDR_S", 730 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 731 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 732 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), 733 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 734 /* Define the secure and non-secure context identifier CP registers 735 * separately because there is no secure bank in V8 (no _EL3). This allows 736 * the secure register to be properly reset and migrated. In the 737 * non-secure case, the 32-bit register will have reset and migration 738 * disabled during registration as it is handled by the 64-bit instance. 739 */ 740 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, 741 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 742 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 743 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), 744 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 745 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, 746 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 747 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 748 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), 749 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 750 REGINFO_SENTINEL 751 }; 752 753 static const ARMCPRegInfo not_v8_cp_reginfo[] = { 754 /* NB: Some of these registers exist in v8 but with more precise 755 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). 756 */ 757 /* MMU Domain access control / MPU write buffer control */ 758 { .name = "DACR", 759 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, 760 .access = PL1_RW, .resetvalue = 0, 761 .writefn = dacr_write, .raw_writefn = raw_write, 762 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 763 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 764 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. 765 * For v6 and v5, these mappings are overly broad. 766 */ 767 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, 768 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 769 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, 770 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 771 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, 772 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 773 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, 774 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 775 /* Cache maintenance ops; some of this space may be overridden later. */ 776 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 777 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 778 .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, 779 REGINFO_SENTINEL 780 }; 781 782 static const ARMCPRegInfo not_v6_cp_reginfo[] = { 783 /* Not all pre-v6 cores implemented this WFI, so this is slightly 784 * over-broad. 785 */ 786 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, 787 .access = PL1_W, .type = ARM_CP_WFI }, 788 REGINFO_SENTINEL 789 }; 790 791 static const ARMCPRegInfo not_v7_cp_reginfo[] = { 792 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which 793 * is UNPREDICTABLE; we choose to NOP as most implementations do). 794 */ 795 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 796 .access = PL1_W, .type = ARM_CP_WFI }, 797 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice 798 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and 799 * OMAPCP will override this space. 800 */ 801 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, 802 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), 803 .resetvalue = 0 }, 804 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, 805 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), 806 .resetvalue = 0 }, 807 /* v6 doesn't have the cache ID registers but Linux reads them anyway */ 808 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, 809 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 810 .resetvalue = 0 }, 811 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; 812 * implementing it as RAZ means the "debug architecture version" bits 813 * will read as a reserved value, which should cause Linux to not try 814 * to use the debug hardware. 815 */ 816 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 817 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 818 /* MMU TLB control. Note that the wildcarding means we cover not just 819 * the unified TLB ops but also the dside/iside/inner-shareable variants. 820 */ 821 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, 822 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, 823 .type = ARM_CP_NO_RAW }, 824 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, 825 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, 826 .type = ARM_CP_NO_RAW }, 827 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, 828 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, 829 .type = ARM_CP_NO_RAW }, 830 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, 831 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, 832 .type = ARM_CP_NO_RAW }, 833 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, 834 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, 835 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, 836 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, 837 REGINFO_SENTINEL 838 }; 839 840 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, 841 uint64_t value) 842 { 843 uint32_t mask = 0; 844 845 /* In ARMv8 most bits of CPACR_EL1 are RES0. */ 846 if (!arm_feature(env, ARM_FEATURE_V8)) { 847 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. 848 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. 849 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. 850 */ 851 if (arm_feature(env, ARM_FEATURE_VFP)) { 852 /* VFP coprocessor: cp10 & cp11 [23:20] */ 853 mask |= (1 << 31) | (1 << 30) | (0xf << 20); 854 855 if (!arm_feature(env, ARM_FEATURE_NEON)) { 856 /* ASEDIS [31] bit is RAO/WI */ 857 value |= (1 << 31); 858 } 859 860 /* VFPv3 and upwards with NEON implement 32 double precision 861 * registers (D0-D31). 862 */ 863 if (!arm_feature(env, ARM_FEATURE_NEON) || 864 !arm_feature(env, ARM_FEATURE_VFP3)) { 865 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ 866 value |= (1 << 30); 867 } 868 } 869 value &= mask; 870 } 871 env->cp15.cpacr_el1 = value; 872 } 873 874 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 875 { 876 /* Call cpacr_write() so that we reset with the correct RAO bits set 877 * for our CPU features. 878 */ 879 cpacr_write(env, ri, 0); 880 } 881 882 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 883 bool isread) 884 { 885 if (arm_feature(env, ARM_FEATURE_V8)) { 886 /* Check if CPACR accesses are to be trapped to EL2 */ 887 if (arm_current_el(env) == 1 && 888 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) { 889 return CP_ACCESS_TRAP_EL2; 890 /* Check if CPACR accesses are to be trapped to EL3 */ 891 } else if (arm_current_el(env) < 3 && 892 (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 893 return CP_ACCESS_TRAP_EL3; 894 } 895 } 896 897 return CP_ACCESS_OK; 898 } 899 900 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, 901 bool isread) 902 { 903 /* Check if CPTR accesses are set to trap to EL3 */ 904 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 905 return CP_ACCESS_TRAP_EL3; 906 } 907 908 return CP_ACCESS_OK; 909 } 910 911 static const ARMCPRegInfo v6_cp_reginfo[] = { 912 /* prefetch by MVA in v6, NOP in v7 */ 913 { .name = "MVA_prefetch", 914 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, 915 .access = PL1_W, .type = ARM_CP_NOP }, 916 /* We need to break the TB after ISB to execute self-modifying code 917 * correctly and also to take any pending interrupts immediately. 918 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. 919 */ 920 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, 921 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, 922 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, 923 .access = PL0_W, .type = ARM_CP_NOP }, 924 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, 925 .access = PL0_W, .type = ARM_CP_NOP }, 926 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, 927 .access = PL1_RW, 928 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), 929 offsetof(CPUARMState, cp15.ifar_ns) }, 930 .resetvalue = 0, }, 931 /* Watchpoint Fault Address Register : should actually only be present 932 * for 1136, 1176, 11MPCore. 933 */ 934 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, 935 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, 936 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, 937 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, 938 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), 939 .resetfn = cpacr_reset, .writefn = cpacr_write }, 940 REGINFO_SENTINEL 941 }; 942 943 /* Definitions for the PMU registers */ 944 #define PMCRN_MASK 0xf800 945 #define PMCRN_SHIFT 11 946 #define PMCRD 0x8 947 #define PMCRC 0x4 948 #define PMCRE 0x1 949 950 static inline uint32_t pmu_num_counters(CPUARMState *env) 951 { 952 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; 953 } 954 955 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 956 static inline uint64_t pmu_counter_mask(CPUARMState *env) 957 { 958 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); 959 } 960 961 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, 962 bool isread) 963 { 964 /* Performance monitor registers user accessibility is controlled 965 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable 966 * trapping to EL2 or EL3 for other accesses. 967 */ 968 int el = arm_current_el(env); 969 970 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { 971 return CP_ACCESS_TRAP; 972 } 973 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 974 && !arm_is_secure_below_el3(env)) { 975 return CP_ACCESS_TRAP_EL2; 976 } 977 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 978 return CP_ACCESS_TRAP_EL3; 979 } 980 981 return CP_ACCESS_OK; 982 } 983 984 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, 985 const ARMCPRegInfo *ri, 986 bool isread) 987 { 988 /* ER: event counter read trap control */ 989 if (arm_feature(env, ARM_FEATURE_V8) 990 && arm_current_el(env) == 0 991 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 992 && isread) { 993 return CP_ACCESS_OK; 994 } 995 996 return pmreg_access(env, ri, isread); 997 } 998 999 static CPAccessResult pmreg_access_swinc(CPUARMState *env, 1000 const ARMCPRegInfo *ri, 1001 bool isread) 1002 { 1003 /* SW: software increment write trap control */ 1004 if (arm_feature(env, ARM_FEATURE_V8) 1005 && arm_current_el(env) == 0 1006 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 1007 && !isread) { 1008 return CP_ACCESS_OK; 1009 } 1010 1011 return pmreg_access(env, ri, isread); 1012 } 1013 1014 #ifndef CONFIG_USER_ONLY 1015 1016 static CPAccessResult pmreg_access_selr(CPUARMState *env, 1017 const ARMCPRegInfo *ri, 1018 bool isread) 1019 { 1020 /* ER: event counter read trap control */ 1021 if (arm_feature(env, ARM_FEATURE_V8) 1022 && arm_current_el(env) == 0 1023 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { 1024 return CP_ACCESS_OK; 1025 } 1026 1027 return pmreg_access(env, ri, isread); 1028 } 1029 1030 static CPAccessResult pmreg_access_ccntr(CPUARMState *env, 1031 const ARMCPRegInfo *ri, 1032 bool isread) 1033 { 1034 /* CR: cycle counter read trap control */ 1035 if (arm_feature(env, ARM_FEATURE_V8) 1036 && arm_current_el(env) == 0 1037 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 1038 && isread) { 1039 return CP_ACCESS_OK; 1040 } 1041 1042 return pmreg_access(env, ri, isread); 1043 } 1044 1045 static inline bool arm_ccnt_enabled(CPUARMState *env) 1046 { 1047 /* This does not support checking PMCCFILTR_EL0 register */ 1048 1049 if (!(env->cp15.c9_pmcr & PMCRE) || !(env->cp15.c9_pmcnten & (1 << 31))) { 1050 return false; 1051 } 1052 1053 return true; 1054 } 1055 1056 void pmccntr_sync(CPUARMState *env) 1057 { 1058 uint64_t temp_ticks; 1059 1060 temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1061 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1062 1063 if (env->cp15.c9_pmcr & PMCRD) { 1064 /* Increment once every 64 processor clock cycles */ 1065 temp_ticks /= 64; 1066 } 1067 1068 if (arm_ccnt_enabled(env)) { 1069 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt; 1070 } 1071 } 1072 1073 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1074 uint64_t value) 1075 { 1076 pmccntr_sync(env); 1077 1078 if (value & PMCRC) { 1079 /* The counter has been reset */ 1080 env->cp15.c15_ccnt = 0; 1081 } 1082 1083 /* only the DP, X, D and E bits are writable */ 1084 env->cp15.c9_pmcr &= ~0x39; 1085 env->cp15.c9_pmcr |= (value & 0x39); 1086 1087 pmccntr_sync(env); 1088 } 1089 1090 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1091 { 1092 uint64_t total_ticks; 1093 1094 if (!arm_ccnt_enabled(env)) { 1095 /* Counter is disabled, do not change value */ 1096 return env->cp15.c15_ccnt; 1097 } 1098 1099 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1100 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1101 1102 if (env->cp15.c9_pmcr & PMCRD) { 1103 /* Increment once every 64 processor clock cycles */ 1104 total_ticks /= 64; 1105 } 1106 return total_ticks - env->cp15.c15_ccnt; 1107 } 1108 1109 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1110 uint64_t value) 1111 { 1112 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and 1113 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the 1114 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are 1115 * accessed. 1116 */ 1117 env->cp15.c9_pmselr = value & 0x1f; 1118 } 1119 1120 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1121 uint64_t value) 1122 { 1123 uint64_t total_ticks; 1124 1125 if (!arm_ccnt_enabled(env)) { 1126 /* Counter is disabled, set the absolute value */ 1127 env->cp15.c15_ccnt = value; 1128 return; 1129 } 1130 1131 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1132 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1133 1134 if (env->cp15.c9_pmcr & PMCRD) { 1135 /* Increment once every 64 processor clock cycles */ 1136 total_ticks /= 64; 1137 } 1138 env->cp15.c15_ccnt = total_ticks - value; 1139 } 1140 1141 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, 1142 uint64_t value) 1143 { 1144 uint64_t cur_val = pmccntr_read(env, NULL); 1145 1146 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); 1147 } 1148 1149 #else /* CONFIG_USER_ONLY */ 1150 1151 void pmccntr_sync(CPUARMState *env) 1152 { 1153 } 1154 1155 #endif 1156 1157 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1158 uint64_t value) 1159 { 1160 pmccntr_sync(env); 1161 env->cp15.pmccfiltr_el0 = value & 0xfc000000; 1162 pmccntr_sync(env); 1163 } 1164 1165 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1166 uint64_t value) 1167 { 1168 value &= pmu_counter_mask(env); 1169 env->cp15.c9_pmcnten |= value; 1170 } 1171 1172 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1173 uint64_t value) 1174 { 1175 value &= pmu_counter_mask(env); 1176 env->cp15.c9_pmcnten &= ~value; 1177 } 1178 1179 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1180 uint64_t value) 1181 { 1182 env->cp15.c9_pmovsr &= ~value; 1183 } 1184 1185 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1186 uint64_t value) 1187 { 1188 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when 1189 * PMSELR value is equal to or greater than the number of implemented 1190 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. 1191 */ 1192 if (env->cp15.c9_pmselr == 0x1f) { 1193 pmccfiltr_write(env, ri, value); 1194 } 1195 } 1196 1197 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) 1198 { 1199 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER 1200 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write(). 1201 */ 1202 if (env->cp15.c9_pmselr == 0x1f) { 1203 return env->cp15.pmccfiltr_el0; 1204 } else { 1205 return 0; 1206 } 1207 } 1208 1209 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1210 uint64_t value) 1211 { 1212 if (arm_feature(env, ARM_FEATURE_V8)) { 1213 env->cp15.c9_pmuserenr = value & 0xf; 1214 } else { 1215 env->cp15.c9_pmuserenr = value & 1; 1216 } 1217 } 1218 1219 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1220 uint64_t value) 1221 { 1222 /* We have no event counters so only the C bit can be changed */ 1223 value &= pmu_counter_mask(env); 1224 env->cp15.c9_pminten |= value; 1225 } 1226 1227 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1228 uint64_t value) 1229 { 1230 value &= pmu_counter_mask(env); 1231 env->cp15.c9_pminten &= ~value; 1232 } 1233 1234 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 1235 uint64_t value) 1236 { 1237 /* Note that even though the AArch64 view of this register has bits 1238 * [10:0] all RES0 we can only mask the bottom 5, to comply with the 1239 * architectural requirements for bits which are RES0 only in some 1240 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 1241 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) 1242 */ 1243 raw_write(env, ri, value & ~0x1FULL); 1244 } 1245 1246 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 1247 { 1248 /* We only mask off bits that are RES0 both for AArch64 and AArch32. 1249 * For bits that vary between AArch32/64, code needs to check the 1250 * current execution mode before directly using the feature bit. 1251 */ 1252 uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK; 1253 1254 if (!arm_feature(env, ARM_FEATURE_EL2)) { 1255 valid_mask &= ~SCR_HCE; 1256 1257 /* On ARMv7, SMD (or SCD as it is called in v7) is only 1258 * supported if EL2 exists. The bit is UNK/SBZP when 1259 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero 1260 * when EL2 is unavailable. 1261 * On ARMv8, this bit is always available. 1262 */ 1263 if (arm_feature(env, ARM_FEATURE_V7) && 1264 !arm_feature(env, ARM_FEATURE_V8)) { 1265 valid_mask &= ~SCR_SMD; 1266 } 1267 } 1268 1269 /* Clear all-context RES0 bits. */ 1270 value &= valid_mask; 1271 raw_write(env, ri, value); 1272 } 1273 1274 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1275 { 1276 ARMCPU *cpu = arm_env_get_cpu(env); 1277 1278 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 1279 * bank 1280 */ 1281 uint32_t index = A32_BANKED_REG_GET(env, csselr, 1282 ri->secure & ARM_CP_SECSTATE_S); 1283 1284 return cpu->ccsidr[index]; 1285 } 1286 1287 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1288 uint64_t value) 1289 { 1290 raw_write(env, ri, value & 0xf); 1291 } 1292 1293 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1294 { 1295 CPUState *cs = ENV_GET_CPU(env); 1296 uint64_t ret = 0; 1297 1298 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { 1299 ret |= CPSR_I; 1300 } 1301 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { 1302 ret |= CPSR_F; 1303 } 1304 /* External aborts are not possible in QEMU so A bit is always clear */ 1305 return ret; 1306 } 1307 1308 static const ARMCPRegInfo v7_cp_reginfo[] = { 1309 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ 1310 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 1311 .access = PL1_W, .type = ARM_CP_NOP }, 1312 /* Performance monitors are implementation defined in v7, 1313 * but with an ARM recommended set of registers, which we 1314 * follow (although we don't actually implement any counters) 1315 * 1316 * Performance registers fall into three categories: 1317 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) 1318 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) 1319 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) 1320 * For the cases controlled by PMUSERENR we must set .access to PL0_RW 1321 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. 1322 */ 1323 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, 1324 .access = PL0_RW, .type = ARM_CP_ALIAS, 1325 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1326 .writefn = pmcntenset_write, 1327 .accessfn = pmreg_access, 1328 .raw_writefn = raw_write }, 1329 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, 1330 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, 1331 .access = PL0_RW, .accessfn = pmreg_access, 1332 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, 1333 .writefn = pmcntenset_write, .raw_writefn = raw_write }, 1334 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, 1335 .access = PL0_RW, 1336 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1337 .accessfn = pmreg_access, 1338 .writefn = pmcntenclr_write, 1339 .type = ARM_CP_ALIAS }, 1340 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, 1341 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, 1342 .access = PL0_RW, .accessfn = pmreg_access, 1343 .type = ARM_CP_ALIAS, 1344 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), 1345 .writefn = pmcntenclr_write }, 1346 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, 1347 .access = PL0_RW, 1348 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 1349 .accessfn = pmreg_access, 1350 .writefn = pmovsr_write, 1351 .raw_writefn = raw_write }, 1352 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, 1353 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, 1354 .access = PL0_RW, .accessfn = pmreg_access, 1355 .type = ARM_CP_ALIAS, 1356 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 1357 .writefn = pmovsr_write, 1358 .raw_writefn = raw_write }, 1359 /* Unimplemented so WI. */ 1360 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, 1361 .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NOP }, 1362 #ifndef CONFIG_USER_ONLY 1363 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, 1364 .access = PL0_RW, .type = ARM_CP_ALIAS, 1365 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), 1366 .accessfn = pmreg_access_selr, .writefn = pmselr_write, 1367 .raw_writefn = raw_write}, 1368 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, 1369 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, 1370 .access = PL0_RW, .accessfn = pmreg_access_selr, 1371 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), 1372 .writefn = pmselr_write, .raw_writefn = raw_write, }, 1373 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, 1374 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, 1375 .readfn = pmccntr_read, .writefn = pmccntr_write32, 1376 .accessfn = pmreg_access_ccntr }, 1377 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, 1378 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, 1379 .access = PL0_RW, .accessfn = pmreg_access_ccntr, 1380 .type = ARM_CP_IO, 1381 .readfn = pmccntr_read, .writefn = pmccntr_write, }, 1382 #endif 1383 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, 1384 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, 1385 .writefn = pmccfiltr_write, 1386 .access = PL0_RW, .accessfn = pmreg_access, 1387 .type = ARM_CP_IO, 1388 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), 1389 .resetvalue = 0, }, 1390 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, 1391 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access, 1392 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 1393 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, 1394 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, 1395 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access, 1396 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 1397 /* Unimplemented, RAZ/WI. */ 1398 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, 1399 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0, 1400 .accessfn = pmreg_access_xevcntr }, 1401 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, 1402 .access = PL0_R | PL1_RW, .accessfn = access_tpm, 1403 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), 1404 .resetvalue = 0, 1405 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 1406 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, 1407 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, 1408 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 1409 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 1410 .resetvalue = 0, 1411 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 1412 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, 1413 .access = PL1_RW, .accessfn = access_tpm, 1414 .type = ARM_CP_ALIAS | ARM_CP_IO, 1415 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), 1416 .resetvalue = 0, 1417 .writefn = pmintenset_write, .raw_writefn = raw_write }, 1418 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, 1419 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, 1420 .access = PL1_RW, .accessfn = access_tpm, 1421 .type = ARM_CP_IO, 1422 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1423 .writefn = pmintenset_write, .raw_writefn = raw_write, 1424 .resetvalue = 0x0 }, 1425 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, 1426 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 1427 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1428 .writefn = pmintenclr_write, }, 1429 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, 1430 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, 1431 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 1432 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1433 .writefn = pmintenclr_write }, 1434 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, 1435 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, 1436 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, 1437 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, 1438 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, 1439 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0, 1440 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), 1441 offsetof(CPUARMState, cp15.csselr_ns) } }, 1442 /* Auxiliary ID register: this actually has an IMPDEF value but for now 1443 * just RAZ for all cores: 1444 */ 1445 { .name = "AIDR", .state = ARM_CP_STATE_BOTH, 1446 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, 1447 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 1448 /* Auxiliary fault status registers: these also are IMPDEF, and we 1449 * choose to RAZ/WI for all cores. 1450 */ 1451 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, 1452 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, 1453 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 1454 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, 1455 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, 1456 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 1457 /* MAIR can just read-as-written because we don't implement caches 1458 * and so don't need to care about memory attributes. 1459 */ 1460 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, 1461 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 1462 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), 1463 .resetvalue = 0 }, 1464 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, 1465 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, 1466 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), 1467 .resetvalue = 0 }, 1468 /* For non-long-descriptor page tables these are PRRR and NMRR; 1469 * regardless they still act as reads-as-written for QEMU. 1470 */ 1471 /* MAIR0/1 are defined separately from their 64-bit counterpart which 1472 * allows them to assign the correct fieldoffset based on the endianness 1473 * handled in the field definitions. 1474 */ 1475 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, 1476 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW, 1477 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), 1478 offsetof(CPUARMState, cp15.mair0_ns) }, 1479 .resetfn = arm_cp_reset_ignore }, 1480 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, 1481 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW, 1482 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), 1483 offsetof(CPUARMState, cp15.mair1_ns) }, 1484 .resetfn = arm_cp_reset_ignore }, 1485 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, 1486 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, 1487 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, 1488 /* 32 bit ITLB invalidates */ 1489 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, 1490 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1491 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 1492 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1493 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, 1494 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1495 /* 32 bit DTLB invalidates */ 1496 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, 1497 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1498 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 1499 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1500 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, 1501 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1502 /* 32 bit TLB invalidates */ 1503 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 1504 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1505 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 1506 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1507 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 1508 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1509 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 1510 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 1511 REGINFO_SENTINEL 1512 }; 1513 1514 static const ARMCPRegInfo v7mp_cp_reginfo[] = { 1515 /* 32 bit TLB invalidates, Inner Shareable */ 1516 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 1517 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write }, 1518 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 1519 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 1520 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 1521 .type = ARM_CP_NO_RAW, .access = PL1_W, 1522 .writefn = tlbiasid_is_write }, 1523 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 1524 .type = ARM_CP_NO_RAW, .access = PL1_W, 1525 .writefn = tlbimvaa_is_write }, 1526 REGINFO_SENTINEL 1527 }; 1528 1529 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1530 uint64_t value) 1531 { 1532 value &= 1; 1533 env->teecr = value; 1534 } 1535 1536 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, 1537 bool isread) 1538 { 1539 if (arm_current_el(env) == 0 && (env->teecr & 1)) { 1540 return CP_ACCESS_TRAP; 1541 } 1542 return CP_ACCESS_OK; 1543 } 1544 1545 static const ARMCPRegInfo t2ee_cp_reginfo[] = { 1546 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, 1547 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), 1548 .resetvalue = 0, 1549 .writefn = teecr_write }, 1550 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, 1551 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), 1552 .accessfn = teehbr_access, .resetvalue = 0 }, 1553 REGINFO_SENTINEL 1554 }; 1555 1556 static const ARMCPRegInfo v6k_cp_reginfo[] = { 1557 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, 1558 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, 1559 .access = PL0_RW, 1560 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, 1561 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, 1562 .access = PL0_RW, 1563 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), 1564 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, 1565 .resetfn = arm_cp_reset_ignore }, 1566 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, 1567 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, 1568 .access = PL0_R|PL1_W, 1569 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), 1570 .resetvalue = 0}, 1571 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, 1572 .access = PL0_R|PL1_W, 1573 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), 1574 offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, 1575 .resetfn = arm_cp_reset_ignore }, 1576 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, 1577 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, 1578 .access = PL1_RW, 1579 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, 1580 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, 1581 .access = PL1_RW, 1582 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), 1583 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, 1584 .resetvalue = 0 }, 1585 REGINFO_SENTINEL 1586 }; 1587 1588 #ifndef CONFIG_USER_ONLY 1589 1590 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, 1591 bool isread) 1592 { 1593 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. 1594 * Writable only at the highest implemented exception level. 1595 */ 1596 int el = arm_current_el(env); 1597 1598 switch (el) { 1599 case 0: 1600 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) { 1601 return CP_ACCESS_TRAP; 1602 } 1603 break; 1604 case 1: 1605 if (!isread && ri->state == ARM_CP_STATE_AA32 && 1606 arm_is_secure_below_el3(env)) { 1607 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ 1608 return CP_ACCESS_TRAP_UNCATEGORIZED; 1609 } 1610 break; 1611 case 2: 1612 case 3: 1613 break; 1614 } 1615 1616 if (!isread && el < arm_highest_el(env)) { 1617 return CP_ACCESS_TRAP_UNCATEGORIZED; 1618 } 1619 1620 return CP_ACCESS_OK; 1621 } 1622 1623 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, 1624 bool isread) 1625 { 1626 unsigned int cur_el = arm_current_el(env); 1627 bool secure = arm_is_secure(env); 1628 1629 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */ 1630 if (cur_el == 0 && 1631 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) { 1632 return CP_ACCESS_TRAP; 1633 } 1634 1635 if (arm_feature(env, ARM_FEATURE_EL2) && 1636 timeridx == GTIMER_PHYS && !secure && cur_el < 2 && 1637 !extract32(env->cp15.cnthctl_el2, 0, 1)) { 1638 return CP_ACCESS_TRAP_EL2; 1639 } 1640 return CP_ACCESS_OK; 1641 } 1642 1643 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, 1644 bool isread) 1645 { 1646 unsigned int cur_el = arm_current_el(env); 1647 bool secure = arm_is_secure(env); 1648 1649 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if 1650 * EL0[PV]TEN is zero. 1651 */ 1652 if (cur_el == 0 && 1653 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { 1654 return CP_ACCESS_TRAP; 1655 } 1656 1657 if (arm_feature(env, ARM_FEATURE_EL2) && 1658 timeridx == GTIMER_PHYS && !secure && cur_el < 2 && 1659 !extract32(env->cp15.cnthctl_el2, 1, 1)) { 1660 return CP_ACCESS_TRAP_EL2; 1661 } 1662 return CP_ACCESS_OK; 1663 } 1664 1665 static CPAccessResult gt_pct_access(CPUARMState *env, 1666 const ARMCPRegInfo *ri, 1667 bool isread) 1668 { 1669 return gt_counter_access(env, GTIMER_PHYS, isread); 1670 } 1671 1672 static CPAccessResult gt_vct_access(CPUARMState *env, 1673 const ARMCPRegInfo *ri, 1674 bool isread) 1675 { 1676 return gt_counter_access(env, GTIMER_VIRT, isread); 1677 } 1678 1679 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 1680 bool isread) 1681 { 1682 return gt_timer_access(env, GTIMER_PHYS, isread); 1683 } 1684 1685 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 1686 bool isread) 1687 { 1688 return gt_timer_access(env, GTIMER_VIRT, isread); 1689 } 1690 1691 static CPAccessResult gt_stimer_access(CPUARMState *env, 1692 const ARMCPRegInfo *ri, 1693 bool isread) 1694 { 1695 /* The AArch64 register view of the secure physical timer is 1696 * always accessible from EL3, and configurably accessible from 1697 * Secure EL1. 1698 */ 1699 switch (arm_current_el(env)) { 1700 case 1: 1701 if (!arm_is_secure(env)) { 1702 return CP_ACCESS_TRAP; 1703 } 1704 if (!(env->cp15.scr_el3 & SCR_ST)) { 1705 return CP_ACCESS_TRAP_EL3; 1706 } 1707 return CP_ACCESS_OK; 1708 case 0: 1709 case 2: 1710 return CP_ACCESS_TRAP; 1711 case 3: 1712 return CP_ACCESS_OK; 1713 default: 1714 g_assert_not_reached(); 1715 } 1716 } 1717 1718 static uint64_t gt_get_countervalue(CPUARMState *env) 1719 { 1720 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE; 1721 } 1722 1723 static void gt_recalc_timer(ARMCPU *cpu, int timeridx) 1724 { 1725 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; 1726 1727 if (gt->ctl & 1) { 1728 /* Timer enabled: calculate and set current ISTATUS, irq, and 1729 * reset timer to when ISTATUS next has to change 1730 */ 1731 uint64_t offset = timeridx == GTIMER_VIRT ? 1732 cpu->env.cp15.cntvoff_el2 : 0; 1733 uint64_t count = gt_get_countervalue(&cpu->env); 1734 /* Note that this must be unsigned 64 bit arithmetic: */ 1735 int istatus = count - offset >= gt->cval; 1736 uint64_t nexttick; 1737 int irqstate; 1738 1739 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); 1740 1741 irqstate = (istatus && !(gt->ctl & 2)); 1742 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 1743 1744 if (istatus) { 1745 /* Next transition is when count rolls back over to zero */ 1746 nexttick = UINT64_MAX; 1747 } else { 1748 /* Next transition is when we hit cval */ 1749 nexttick = gt->cval + offset; 1750 } 1751 /* Note that the desired next expiry time might be beyond the 1752 * signed-64-bit range of a QEMUTimer -- in this case we just 1753 * set the timer for as far in the future as possible. When the 1754 * timer expires we will reset the timer for any remaining period. 1755 */ 1756 if (nexttick > INT64_MAX / GTIMER_SCALE) { 1757 nexttick = INT64_MAX / GTIMER_SCALE; 1758 } 1759 timer_mod(cpu->gt_timer[timeridx], nexttick); 1760 trace_arm_gt_recalc(timeridx, irqstate, nexttick); 1761 } else { 1762 /* Timer disabled: ISTATUS and timer output always clear */ 1763 gt->ctl &= ~4; 1764 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); 1765 timer_del(cpu->gt_timer[timeridx]); 1766 trace_arm_gt_recalc_disabled(timeridx); 1767 } 1768 } 1769 1770 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 1771 int timeridx) 1772 { 1773 ARMCPU *cpu = arm_env_get_cpu(env); 1774 1775 timer_del(cpu->gt_timer[timeridx]); 1776 } 1777 1778 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 1779 { 1780 return gt_get_countervalue(env); 1781 } 1782 1783 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 1784 { 1785 return gt_get_countervalue(env) - env->cp15.cntvoff_el2; 1786 } 1787 1788 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1789 int timeridx, 1790 uint64_t value) 1791 { 1792 trace_arm_gt_cval_write(timeridx, value); 1793 env->cp15.c14_timer[timeridx].cval = value; 1794 gt_recalc_timer(arm_env_get_cpu(env), timeridx); 1795 } 1796 1797 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, 1798 int timeridx) 1799 { 1800 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0; 1801 1802 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - 1803 (gt_get_countervalue(env) - offset)); 1804 } 1805 1806 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1807 int timeridx, 1808 uint64_t value) 1809 { 1810 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0; 1811 1812 trace_arm_gt_tval_write(timeridx, value); 1813 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 1814 sextract64(value, 0, 32); 1815 gt_recalc_timer(arm_env_get_cpu(env), timeridx); 1816 } 1817 1818 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1819 int timeridx, 1820 uint64_t value) 1821 { 1822 ARMCPU *cpu = arm_env_get_cpu(env); 1823 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 1824 1825 trace_arm_gt_ctl_write(timeridx, value); 1826 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); 1827 if ((oldval ^ value) & 1) { 1828 /* Enable toggled */ 1829 gt_recalc_timer(cpu, timeridx); 1830 } else if ((oldval ^ value) & 2) { 1831 /* IMASK toggled: don't need to recalculate, 1832 * just set the interrupt line based on ISTATUS 1833 */ 1834 int irqstate = (oldval & 4) && !(value & 2); 1835 1836 trace_arm_gt_imask_toggle(timeridx, irqstate); 1837 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 1838 } 1839 } 1840 1841 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1842 { 1843 gt_timer_reset(env, ri, GTIMER_PHYS); 1844 } 1845 1846 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1847 uint64_t value) 1848 { 1849 gt_cval_write(env, ri, GTIMER_PHYS, value); 1850 } 1851 1852 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1853 { 1854 return gt_tval_read(env, ri, GTIMER_PHYS); 1855 } 1856 1857 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1858 uint64_t value) 1859 { 1860 gt_tval_write(env, ri, GTIMER_PHYS, value); 1861 } 1862 1863 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1864 uint64_t value) 1865 { 1866 gt_ctl_write(env, ri, GTIMER_PHYS, value); 1867 } 1868 1869 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1870 { 1871 gt_timer_reset(env, ri, GTIMER_VIRT); 1872 } 1873 1874 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1875 uint64_t value) 1876 { 1877 gt_cval_write(env, ri, GTIMER_VIRT, value); 1878 } 1879 1880 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1881 { 1882 return gt_tval_read(env, ri, GTIMER_VIRT); 1883 } 1884 1885 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1886 uint64_t value) 1887 { 1888 gt_tval_write(env, ri, GTIMER_VIRT, value); 1889 } 1890 1891 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1892 uint64_t value) 1893 { 1894 gt_ctl_write(env, ri, GTIMER_VIRT, value); 1895 } 1896 1897 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 1898 uint64_t value) 1899 { 1900 ARMCPU *cpu = arm_env_get_cpu(env); 1901 1902 trace_arm_gt_cntvoff_write(value); 1903 raw_write(env, ri, value); 1904 gt_recalc_timer(cpu, GTIMER_VIRT); 1905 } 1906 1907 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1908 { 1909 gt_timer_reset(env, ri, GTIMER_HYP); 1910 } 1911 1912 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1913 uint64_t value) 1914 { 1915 gt_cval_write(env, ri, GTIMER_HYP, value); 1916 } 1917 1918 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1919 { 1920 return gt_tval_read(env, ri, GTIMER_HYP); 1921 } 1922 1923 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1924 uint64_t value) 1925 { 1926 gt_tval_write(env, ri, GTIMER_HYP, value); 1927 } 1928 1929 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1930 uint64_t value) 1931 { 1932 gt_ctl_write(env, ri, GTIMER_HYP, value); 1933 } 1934 1935 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1936 { 1937 gt_timer_reset(env, ri, GTIMER_SEC); 1938 } 1939 1940 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1941 uint64_t value) 1942 { 1943 gt_cval_write(env, ri, GTIMER_SEC, value); 1944 } 1945 1946 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1947 { 1948 return gt_tval_read(env, ri, GTIMER_SEC); 1949 } 1950 1951 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1952 uint64_t value) 1953 { 1954 gt_tval_write(env, ri, GTIMER_SEC, value); 1955 } 1956 1957 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1958 uint64_t value) 1959 { 1960 gt_ctl_write(env, ri, GTIMER_SEC, value); 1961 } 1962 1963 void arm_gt_ptimer_cb(void *opaque) 1964 { 1965 ARMCPU *cpu = opaque; 1966 1967 gt_recalc_timer(cpu, GTIMER_PHYS); 1968 } 1969 1970 void arm_gt_vtimer_cb(void *opaque) 1971 { 1972 ARMCPU *cpu = opaque; 1973 1974 gt_recalc_timer(cpu, GTIMER_VIRT); 1975 } 1976 1977 void arm_gt_htimer_cb(void *opaque) 1978 { 1979 ARMCPU *cpu = opaque; 1980 1981 gt_recalc_timer(cpu, GTIMER_HYP); 1982 } 1983 1984 void arm_gt_stimer_cb(void *opaque) 1985 { 1986 ARMCPU *cpu = opaque; 1987 1988 gt_recalc_timer(cpu, GTIMER_SEC); 1989 } 1990 1991 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 1992 /* Note that CNTFRQ is purely reads-as-written for the benefit 1993 * of software; writing it doesn't actually change the timer frequency. 1994 * Our reset value matches the fixed frequency we implement the timer at. 1995 */ 1996 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, 1997 .type = ARM_CP_ALIAS, 1998 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 1999 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), 2000 }, 2001 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 2002 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 2003 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 2004 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 2005 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE, 2006 }, 2007 /* overall control: mostly access permissions */ 2008 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, 2009 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, 2010 .access = PL1_RW, 2011 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), 2012 .resetvalue = 0, 2013 }, 2014 /* per-timer control */ 2015 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2016 .secure = ARM_CP_SECSTATE_NS, 2017 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2018 .accessfn = gt_ptimer_access, 2019 .fieldoffset = offsetoflow32(CPUARMState, 2020 cp15.c14_timer[GTIMER_PHYS].ctl), 2021 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write, 2022 }, 2023 { .name = "CNTP_CTL_S", 2024 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2025 .secure = ARM_CP_SECSTATE_S, 2026 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2027 .accessfn = gt_ptimer_access, 2028 .fieldoffset = offsetoflow32(CPUARMState, 2029 cp15.c14_timer[GTIMER_SEC].ctl), 2030 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 2031 }, 2032 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, 2033 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, 2034 .type = ARM_CP_IO, .access = PL1_RW | PL0_R, 2035 .accessfn = gt_ptimer_access, 2036 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 2037 .resetvalue = 0, 2038 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write, 2039 }, 2040 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, 2041 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2042 .accessfn = gt_vtimer_access, 2043 .fieldoffset = offsetoflow32(CPUARMState, 2044 cp15.c14_timer[GTIMER_VIRT].ctl), 2045 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write, 2046 }, 2047 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, 2048 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, 2049 .type = ARM_CP_IO, .access = PL1_RW | PL0_R, 2050 .accessfn = gt_vtimer_access, 2051 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 2052 .resetvalue = 0, 2053 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write, 2054 }, 2055 /* TimerValue views: a 32 bit downcounting view of the underlying state */ 2056 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2057 .secure = ARM_CP_SECSTATE_NS, 2058 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2059 .accessfn = gt_ptimer_access, 2060 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write, 2061 }, 2062 { .name = "CNTP_TVAL_S", 2063 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2064 .secure = ARM_CP_SECSTATE_S, 2065 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2066 .accessfn = gt_ptimer_access, 2067 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, 2068 }, 2069 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2070 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, 2071 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2072 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, 2073 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write, 2074 }, 2075 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, 2076 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2077 .accessfn = gt_vtimer_access, 2078 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write, 2079 }, 2080 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2081 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, 2082 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2083 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, 2084 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write, 2085 }, 2086 /* The counter itself */ 2087 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, 2088 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2089 .accessfn = gt_pct_access, 2090 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 2091 }, 2092 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, 2093 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, 2094 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2095 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 2096 }, 2097 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, 2098 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2099 .accessfn = gt_vct_access, 2100 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 2101 }, 2102 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 2103 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 2104 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2105 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 2106 }, 2107 /* Comparison value, indicating when the timer goes off */ 2108 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, 2109 .secure = ARM_CP_SECSTATE_NS, 2110 .access = PL1_RW | PL0_R, 2111 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2112 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 2113 .accessfn = gt_ptimer_access, 2114 .writefn = gt_phys_cval_write, .raw_writefn = raw_write, 2115 }, 2116 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, 2117 .secure = ARM_CP_SECSTATE_S, 2118 .access = PL1_RW | PL0_R, 2119 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2120 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 2121 .accessfn = gt_ptimer_access, 2122 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 2123 }, 2124 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, 2125 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, 2126 .access = PL1_RW | PL0_R, 2127 .type = ARM_CP_IO, 2128 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 2129 .resetvalue = 0, .accessfn = gt_ptimer_access, 2130 .writefn = gt_phys_cval_write, .raw_writefn = raw_write, 2131 }, 2132 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, 2133 .access = PL1_RW | PL0_R, 2134 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2135 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 2136 .accessfn = gt_vtimer_access, 2137 .writefn = gt_virt_cval_write, .raw_writefn = raw_write, 2138 }, 2139 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, 2140 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, 2141 .access = PL1_RW | PL0_R, 2142 .type = ARM_CP_IO, 2143 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 2144 .resetvalue = 0, .accessfn = gt_vtimer_access, 2145 .writefn = gt_virt_cval_write, .raw_writefn = raw_write, 2146 }, 2147 /* Secure timer -- this is actually restricted to only EL3 2148 * and configurably Secure-EL1 via the accessfn. 2149 */ 2150 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, 2151 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, 2152 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, 2153 .accessfn = gt_stimer_access, 2154 .readfn = gt_sec_tval_read, 2155 .writefn = gt_sec_tval_write, 2156 .resetfn = gt_sec_timer_reset, 2157 }, 2158 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, 2159 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, 2160 .type = ARM_CP_IO, .access = PL1_RW, 2161 .accessfn = gt_stimer_access, 2162 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), 2163 .resetvalue = 0, 2164 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 2165 }, 2166 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, 2167 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, 2168 .type = ARM_CP_IO, .access = PL1_RW, 2169 .accessfn = gt_stimer_access, 2170 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 2171 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 2172 }, 2173 REGINFO_SENTINEL 2174 }; 2175 2176 #else 2177 2178 /* In user-mode most of the generic timer registers are inaccessible 2179 * however modern kernels (4.12+) allow access to cntvct_el0 2180 */ 2181 2182 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2183 { 2184 /* Currently we have no support for QEMUTimer in linux-user so we 2185 * can't call gt_get_countervalue(env), instead we directly 2186 * call the lower level functions. 2187 */ 2188 return cpu_get_clock() / GTIMER_SCALE; 2189 } 2190 2191 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 2192 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 2193 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 2194 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */, 2195 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 2196 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE, 2197 }, 2198 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 2199 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 2200 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2201 .readfn = gt_virt_cnt_read, 2202 }, 2203 REGINFO_SENTINEL 2204 }; 2205 2206 #endif 2207 2208 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 2209 { 2210 if (arm_feature(env, ARM_FEATURE_LPAE)) { 2211 raw_write(env, ri, value); 2212 } else if (arm_feature(env, ARM_FEATURE_V7)) { 2213 raw_write(env, ri, value & 0xfffff6ff); 2214 } else { 2215 raw_write(env, ri, value & 0xfffff1ff); 2216 } 2217 } 2218 2219 #ifndef CONFIG_USER_ONLY 2220 /* get_phys_addr() isn't present for user-mode-only targets */ 2221 2222 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, 2223 bool isread) 2224 { 2225 if (ri->opc2 & 4) { 2226 /* The ATS12NSO* operations must trap to EL3 if executed in 2227 * Secure EL1 (which can only happen if EL3 is AArch64). 2228 * They are simply UNDEF if executed from NS EL1. 2229 * They function normally from EL2 or EL3. 2230 */ 2231 if (arm_current_el(env) == 1) { 2232 if (arm_is_secure_below_el3(env)) { 2233 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; 2234 } 2235 return CP_ACCESS_TRAP_UNCATEGORIZED; 2236 } 2237 } 2238 return CP_ACCESS_OK; 2239 } 2240 2241 static uint64_t do_ats_write(CPUARMState *env, uint64_t value, 2242 MMUAccessType access_type, ARMMMUIdx mmu_idx) 2243 { 2244 hwaddr phys_addr; 2245 target_ulong page_size; 2246 int prot; 2247 bool ret; 2248 uint64_t par64; 2249 bool format64 = false; 2250 MemTxAttrs attrs = {}; 2251 ARMMMUFaultInfo fi = {}; 2252 ARMCacheAttrs cacheattrs = {}; 2253 2254 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, 2255 &prot, &page_size, &fi, &cacheattrs); 2256 2257 if (is_a64(env)) { 2258 format64 = true; 2259 } else if (arm_feature(env, ARM_FEATURE_LPAE)) { 2260 /* 2261 * ATS1Cxx: 2262 * * TTBCR.EAE determines whether the result is returned using the 2263 * 32-bit or the 64-bit PAR format 2264 * * Instructions executed in Hyp mode always use the 64bit format 2265 * 2266 * ATS1S2NSOxx uses the 64bit format if any of the following is true: 2267 * * The Non-secure TTBCR.EAE bit is set to 1 2268 * * The implementation includes EL2, and the value of HCR.VM is 1 2269 * 2270 * ATS1Hx always uses the 64bit format (not supported yet). 2271 */ 2272 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); 2273 2274 if (arm_feature(env, ARM_FEATURE_EL2)) { 2275 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 2276 format64 |= env->cp15.hcr_el2 & HCR_VM; 2277 } else { 2278 format64 |= arm_current_el(env) == 2; 2279 } 2280 } 2281 } 2282 2283 if (format64) { 2284 /* Create a 64-bit PAR */ 2285 par64 = (1 << 11); /* LPAE bit always set */ 2286 if (!ret) { 2287 par64 |= phys_addr & ~0xfffULL; 2288 if (!attrs.secure) { 2289 par64 |= (1 << 9); /* NS */ 2290 } 2291 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ 2292 par64 |= cacheattrs.shareability << 7; /* SH */ 2293 } else { 2294 uint32_t fsr = arm_fi_to_lfsc(&fi); 2295 2296 par64 |= 1; /* F */ 2297 par64 |= (fsr & 0x3f) << 1; /* FS */ 2298 /* Note that S2WLK and FSTAGE are always zero, because we don't 2299 * implement virtualization and therefore there can't be a stage 2 2300 * fault. 2301 */ 2302 } 2303 } else { 2304 /* fsr is a DFSR/IFSR value for the short descriptor 2305 * translation table format (with WnR always clear). 2306 * Convert it to a 32-bit PAR. 2307 */ 2308 if (!ret) { 2309 /* We do not set any attribute bits in the PAR */ 2310 if (page_size == (1 << 24) 2311 && arm_feature(env, ARM_FEATURE_V7)) { 2312 par64 = (phys_addr & 0xff000000) | (1 << 1); 2313 } else { 2314 par64 = phys_addr & 0xfffff000; 2315 } 2316 if (!attrs.secure) { 2317 par64 |= (1 << 9); /* NS */ 2318 } 2319 } else { 2320 uint32_t fsr = arm_fi_to_sfsc(&fi); 2321 2322 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | 2323 ((fsr & 0xf) << 1) | 1; 2324 } 2325 } 2326 return par64; 2327 } 2328 2329 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 2330 { 2331 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2332 uint64_t par64; 2333 ARMMMUIdx mmu_idx; 2334 int el = arm_current_el(env); 2335 bool secure = arm_is_secure_below_el3(env); 2336 2337 switch (ri->opc2 & 6) { 2338 case 0: 2339 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */ 2340 switch (el) { 2341 case 3: 2342 mmu_idx = ARMMMUIdx_S1E3; 2343 break; 2344 case 2: 2345 mmu_idx = ARMMMUIdx_S1NSE1; 2346 break; 2347 case 1: 2348 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1; 2349 break; 2350 default: 2351 g_assert_not_reached(); 2352 } 2353 break; 2354 case 2: 2355 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ 2356 switch (el) { 2357 case 3: 2358 mmu_idx = ARMMMUIdx_S1SE0; 2359 break; 2360 case 2: 2361 mmu_idx = ARMMMUIdx_S1NSE0; 2362 break; 2363 case 1: 2364 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0; 2365 break; 2366 default: 2367 g_assert_not_reached(); 2368 } 2369 break; 2370 case 4: 2371 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ 2372 mmu_idx = ARMMMUIdx_S12NSE1; 2373 break; 2374 case 6: 2375 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ 2376 mmu_idx = ARMMMUIdx_S12NSE0; 2377 break; 2378 default: 2379 g_assert_not_reached(); 2380 } 2381 2382 par64 = do_ats_write(env, value, access_type, mmu_idx); 2383 2384 A32_BANKED_CURRENT_REG_SET(env, par, par64); 2385 } 2386 2387 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, 2388 uint64_t value) 2389 { 2390 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2391 uint64_t par64; 2392 2393 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS); 2394 2395 A32_BANKED_CURRENT_REG_SET(env, par, par64); 2396 } 2397 2398 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, 2399 bool isread) 2400 { 2401 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) { 2402 return CP_ACCESS_TRAP; 2403 } 2404 return CP_ACCESS_OK; 2405 } 2406 2407 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, 2408 uint64_t value) 2409 { 2410 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2411 ARMMMUIdx mmu_idx; 2412 int secure = arm_is_secure_below_el3(env); 2413 2414 switch (ri->opc2 & 6) { 2415 case 0: 2416 switch (ri->opc1) { 2417 case 0: /* AT S1E1R, AT S1E1W */ 2418 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1; 2419 break; 2420 case 4: /* AT S1E2R, AT S1E2W */ 2421 mmu_idx = ARMMMUIdx_S1E2; 2422 break; 2423 case 6: /* AT S1E3R, AT S1E3W */ 2424 mmu_idx = ARMMMUIdx_S1E3; 2425 break; 2426 default: 2427 g_assert_not_reached(); 2428 } 2429 break; 2430 case 2: /* AT S1E0R, AT S1E0W */ 2431 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0; 2432 break; 2433 case 4: /* AT S12E1R, AT S12E1W */ 2434 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1; 2435 break; 2436 case 6: /* AT S12E0R, AT S12E0W */ 2437 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0; 2438 break; 2439 default: 2440 g_assert_not_reached(); 2441 } 2442 2443 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); 2444 } 2445 #endif 2446 2447 static const ARMCPRegInfo vapa_cp_reginfo[] = { 2448 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, 2449 .access = PL1_RW, .resetvalue = 0, 2450 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), 2451 offsetoflow32(CPUARMState, cp15.par_ns) }, 2452 .writefn = par_write }, 2453 #ifndef CONFIG_USER_ONLY 2454 /* This underdecoding is safe because the reginfo is NO_RAW. */ 2455 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, 2456 .access = PL1_W, .accessfn = ats_access, 2457 .writefn = ats_write, .type = ARM_CP_NO_RAW }, 2458 #endif 2459 REGINFO_SENTINEL 2460 }; 2461 2462 /* Return basic MPU access permission bits. */ 2463 static uint32_t simple_mpu_ap_bits(uint32_t val) 2464 { 2465 uint32_t ret; 2466 uint32_t mask; 2467 int i; 2468 ret = 0; 2469 mask = 3; 2470 for (i = 0; i < 16; i += 2) { 2471 ret |= (val >> i) & mask; 2472 mask <<= 2; 2473 } 2474 return ret; 2475 } 2476 2477 /* Pad basic MPU access permission bits to extended format. */ 2478 static uint32_t extended_mpu_ap_bits(uint32_t val) 2479 { 2480 uint32_t ret; 2481 uint32_t mask; 2482 int i; 2483 ret = 0; 2484 mask = 3; 2485 for (i = 0; i < 16; i += 2) { 2486 ret |= (val & mask) << i; 2487 mask <<= 2; 2488 } 2489 return ret; 2490 } 2491 2492 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 2493 uint64_t value) 2494 { 2495 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); 2496 } 2497 2498 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 2499 { 2500 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); 2501 } 2502 2503 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 2504 uint64_t value) 2505 { 2506 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); 2507 } 2508 2509 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 2510 { 2511 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); 2512 } 2513 2514 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) 2515 { 2516 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 2517 2518 if (!u32p) { 2519 return 0; 2520 } 2521 2522 u32p += env->pmsav7.rnr[M_REG_NS]; 2523 return *u32p; 2524 } 2525 2526 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 2527 uint64_t value) 2528 { 2529 ARMCPU *cpu = arm_env_get_cpu(env); 2530 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 2531 2532 if (!u32p) { 2533 return; 2534 } 2535 2536 u32p += env->pmsav7.rnr[M_REG_NS]; 2537 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 2538 *u32p = value; 2539 } 2540 2541 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2542 uint64_t value) 2543 { 2544 ARMCPU *cpu = arm_env_get_cpu(env); 2545 uint32_t nrgs = cpu->pmsav7_dregion; 2546 2547 if (value >= nrgs) { 2548 qemu_log_mask(LOG_GUEST_ERROR, 2549 "PMSAv7 RGNR write >= # supported regions, %" PRIu32 2550 " > %" PRIu32 "\n", (uint32_t)value, nrgs); 2551 return; 2552 } 2553 2554 raw_write(env, ri, value); 2555 } 2556 2557 static const ARMCPRegInfo pmsav7_cp_reginfo[] = { 2558 /* Reset for all these registers is handled in arm_cpu_reset(), 2559 * because the PMSAv7 is also used by M-profile CPUs, which do 2560 * not register cpregs but still need the state to be reset. 2561 */ 2562 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, 2563 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2564 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), 2565 .readfn = pmsav7_read, .writefn = pmsav7_write, 2566 .resetfn = arm_cp_reset_ignore }, 2567 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, 2568 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2569 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), 2570 .readfn = pmsav7_read, .writefn = pmsav7_write, 2571 .resetfn = arm_cp_reset_ignore }, 2572 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, 2573 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2574 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), 2575 .readfn = pmsav7_read, .writefn = pmsav7_write, 2576 .resetfn = arm_cp_reset_ignore }, 2577 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, 2578 .access = PL1_RW, 2579 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), 2580 .writefn = pmsav7_rgnr_write, 2581 .resetfn = arm_cp_reset_ignore }, 2582 REGINFO_SENTINEL 2583 }; 2584 2585 static const ARMCPRegInfo pmsav5_cp_reginfo[] = { 2586 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 2587 .access = PL1_RW, .type = ARM_CP_ALIAS, 2588 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 2589 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, 2590 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 2591 .access = PL1_RW, .type = ARM_CP_ALIAS, 2592 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 2593 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, 2594 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, 2595 .access = PL1_RW, 2596 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 2597 .resetvalue = 0, }, 2598 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, 2599 .access = PL1_RW, 2600 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 2601 .resetvalue = 0, }, 2602 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 2603 .access = PL1_RW, 2604 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, 2605 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, 2606 .access = PL1_RW, 2607 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, 2608 /* Protection region base and size registers */ 2609 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, 2610 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2611 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, 2612 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, 2613 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2614 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, 2615 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, 2616 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2617 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, 2618 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, 2619 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2620 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, 2621 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, 2622 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2623 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, 2624 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, 2625 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2626 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, 2627 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, 2628 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2629 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, 2630 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, 2631 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2632 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, 2633 REGINFO_SENTINEL 2634 }; 2635 2636 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 2637 uint64_t value) 2638 { 2639 TCR *tcr = raw_ptr(env, ri); 2640 int maskshift = extract32(value, 0, 3); 2641 2642 if (!arm_feature(env, ARM_FEATURE_V8)) { 2643 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { 2644 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when 2645 * using Long-desciptor translation table format */ 2646 value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); 2647 } else if (arm_feature(env, ARM_FEATURE_EL3)) { 2648 /* In an implementation that includes the Security Extensions 2649 * TTBCR has additional fields PD0 [4] and PD1 [5] for 2650 * Short-descriptor translation table format. 2651 */ 2652 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; 2653 } else { 2654 value &= TTBCR_N; 2655 } 2656 } 2657 2658 /* Update the masks corresponding to the TCR bank being written 2659 * Note that we always calculate mask and base_mask, but 2660 * they are only used for short-descriptor tables (ie if EAE is 0); 2661 * for long-descriptor tables the TCR fields are used differently 2662 * and the mask and base_mask values are meaningless. 2663 */ 2664 tcr->raw_tcr = value; 2665 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); 2666 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); 2667 } 2668 2669 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2670 uint64_t value) 2671 { 2672 ARMCPU *cpu = arm_env_get_cpu(env); 2673 2674 if (arm_feature(env, ARM_FEATURE_LPAE)) { 2675 /* With LPAE the TTBCR could result in a change of ASID 2676 * via the TTBCR.A1 bit, so do a TLB flush. 2677 */ 2678 tlb_flush(CPU(cpu)); 2679 } 2680 vmsa_ttbcr_raw_write(env, ri, value); 2681 } 2682 2683 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2684 { 2685 TCR *tcr = raw_ptr(env, ri); 2686 2687 /* Reset both the TCR as well as the masks corresponding to the bank of 2688 * the TCR being reset. 2689 */ 2690 tcr->raw_tcr = 0; 2691 tcr->mask = 0; 2692 tcr->base_mask = 0xffffc000u; 2693 } 2694 2695 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, 2696 uint64_t value) 2697 { 2698 ARMCPU *cpu = arm_env_get_cpu(env); 2699 TCR *tcr = raw_ptr(env, ri); 2700 2701 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ 2702 tlb_flush(CPU(cpu)); 2703 tcr->raw_tcr = value; 2704 } 2705 2706 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2707 uint64_t value) 2708 { 2709 /* 64 bit accesses to the TTBRs can change the ASID and so we 2710 * must flush the TLB. 2711 */ 2712 if (cpreg_field_is_64bit(ri)) { 2713 ARMCPU *cpu = arm_env_get_cpu(env); 2714 2715 tlb_flush(CPU(cpu)); 2716 } 2717 raw_write(env, ri, value); 2718 } 2719 2720 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2721 uint64_t value) 2722 { 2723 ARMCPU *cpu = arm_env_get_cpu(env); 2724 CPUState *cs = CPU(cpu); 2725 2726 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */ 2727 if (raw_read(env, ri) != value) { 2728 tlb_flush_by_mmuidx(cs, 2729 ARMMMUIdxBit_S12NSE1 | 2730 ARMMMUIdxBit_S12NSE0 | 2731 ARMMMUIdxBit_S2NS); 2732 raw_write(env, ri, value); 2733 } 2734 } 2735 2736 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { 2737 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 2738 .access = PL1_RW, .type = ARM_CP_ALIAS, 2739 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), 2740 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, 2741 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 2742 .access = PL1_RW, .resetvalue = 0, 2743 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), 2744 offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, 2745 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, 2746 .access = PL1_RW, .resetvalue = 0, 2747 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), 2748 offsetof(CPUARMState, cp15.dfar_ns) } }, 2749 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, 2750 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, 2751 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), 2752 .resetvalue = 0, }, 2753 REGINFO_SENTINEL 2754 }; 2755 2756 static const ARMCPRegInfo vmsa_cp_reginfo[] = { 2757 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, 2758 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, 2759 .access = PL1_RW, 2760 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, 2761 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, 2762 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, 2763 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 2764 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 2765 offsetof(CPUARMState, cp15.ttbr0_ns) } }, 2766 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, 2767 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, 2768 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 2769 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 2770 offsetof(CPUARMState, cp15.ttbr1_ns) } }, 2771 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, 2772 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 2773 .access = PL1_RW, .writefn = vmsa_tcr_el1_write, 2774 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, 2775 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, 2776 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 2777 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, 2778 .raw_writefn = vmsa_ttbcr_raw_write, 2779 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), 2780 offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, 2781 REGINFO_SENTINEL 2782 }; 2783 2784 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, 2785 uint64_t value) 2786 { 2787 env->cp15.c15_ticonfig = value & 0xe7; 2788 /* The OS_TYPE bit in this register changes the reported CPUID! */ 2789 env->cp15.c0_cpuid = (value & (1 << 5)) ? 2790 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 2791 } 2792 2793 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, 2794 uint64_t value) 2795 { 2796 env->cp15.c15_threadid = value & 0xffff; 2797 } 2798 2799 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, 2800 uint64_t value) 2801 { 2802 /* Wait-for-interrupt (deprecated) */ 2803 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT); 2804 } 2805 2806 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, 2807 uint64_t value) 2808 { 2809 /* On OMAP there are registers indicating the max/min index of dcache lines 2810 * containing a dirty line; cache flush operations have to reset these. 2811 */ 2812 env->cp15.c15_i_max = 0x000; 2813 env->cp15.c15_i_min = 0xff0; 2814 } 2815 2816 static const ARMCPRegInfo omap_cp_reginfo[] = { 2817 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, 2818 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, 2819 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), 2820 .resetvalue = 0, }, 2821 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, 2822 .access = PL1_RW, .type = ARM_CP_NOP }, 2823 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, 2824 .access = PL1_RW, 2825 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, 2826 .writefn = omap_ticonfig_write }, 2827 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, 2828 .access = PL1_RW, 2829 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, 2830 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, 2831 .access = PL1_RW, .resetvalue = 0xff0, 2832 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, 2833 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, 2834 .access = PL1_RW, 2835 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, 2836 .writefn = omap_threadid_write }, 2837 { .name = "TI925T_STATUS", .cp = 15, .crn = 15, 2838 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 2839 .type = ARM_CP_NO_RAW, 2840 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, 2841 /* TODO: Peripheral port remap register: 2842 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller 2843 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), 2844 * when MMU is off. 2845 */ 2846 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 2847 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 2848 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, 2849 .writefn = omap_cachemaint_write }, 2850 { .name = "C9", .cp = 15, .crn = 9, 2851 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, 2852 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, 2853 REGINFO_SENTINEL 2854 }; 2855 2856 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, 2857 uint64_t value) 2858 { 2859 env->cp15.c15_cpar = value & 0x3fff; 2860 } 2861 2862 static const ARMCPRegInfo xscale_cp_reginfo[] = { 2863 { .name = "XSCALE_CPAR", 2864 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 2865 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, 2866 .writefn = xscale_cpar_write, }, 2867 { .name = "XSCALE_AUXCR", 2868 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, 2869 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), 2870 .resetvalue = 0, }, 2871 /* XScale specific cache-lockdown: since we have no cache we NOP these 2872 * and hope the guest does not really rely on cache behaviour. 2873 */ 2874 { .name = "XSCALE_LOCK_ICACHE_LINE", 2875 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, 2876 .access = PL1_W, .type = ARM_CP_NOP }, 2877 { .name = "XSCALE_UNLOCK_ICACHE", 2878 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, 2879 .access = PL1_W, .type = ARM_CP_NOP }, 2880 { .name = "XSCALE_DCACHE_LOCK", 2881 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, 2882 .access = PL1_RW, .type = ARM_CP_NOP }, 2883 { .name = "XSCALE_UNLOCK_DCACHE", 2884 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, 2885 .access = PL1_W, .type = ARM_CP_NOP }, 2886 REGINFO_SENTINEL 2887 }; 2888 2889 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { 2890 /* RAZ/WI the whole crn=15 space, when we don't have a more specific 2891 * implementation of this implementation-defined space. 2892 * Ideally this should eventually disappear in favour of actually 2893 * implementing the correct behaviour for all cores. 2894 */ 2895 { .name = "C15_IMPDEF", .cp = 15, .crn = 15, 2896 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 2897 .access = PL1_RW, 2898 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, 2899 .resetvalue = 0 }, 2900 REGINFO_SENTINEL 2901 }; 2902 2903 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { 2904 /* Cache status: RAZ because we have no cache so it's always clean */ 2905 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, 2906 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2907 .resetvalue = 0 }, 2908 REGINFO_SENTINEL 2909 }; 2910 2911 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { 2912 /* We never have a a block transfer operation in progress */ 2913 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, 2914 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2915 .resetvalue = 0 }, 2916 /* The cache ops themselves: these all NOP for QEMU */ 2917 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, 2918 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2919 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, 2920 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2921 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, 2922 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2923 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, 2924 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2925 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, 2926 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2927 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, 2928 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2929 REGINFO_SENTINEL 2930 }; 2931 2932 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { 2933 /* The cache test-and-clean instructions always return (1 << 30) 2934 * to indicate that there are no dirty cache lines. 2935 */ 2936 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, 2937 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2938 .resetvalue = (1 << 30) }, 2939 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, 2940 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2941 .resetvalue = (1 << 30) }, 2942 REGINFO_SENTINEL 2943 }; 2944 2945 static const ARMCPRegInfo strongarm_cp_reginfo[] = { 2946 /* Ignore ReadBuffer accesses */ 2947 { .name = "C9_READBUFFER", .cp = 15, .crn = 9, 2948 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 2949 .access = PL1_RW, .resetvalue = 0, 2950 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, 2951 REGINFO_SENTINEL 2952 }; 2953 2954 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2955 { 2956 ARMCPU *cpu = arm_env_get_cpu(env); 2957 unsigned int cur_el = arm_current_el(env); 2958 bool secure = arm_is_secure(env); 2959 2960 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 2961 return env->cp15.vpidr_el2; 2962 } 2963 return raw_read(env, ri); 2964 } 2965 2966 static uint64_t mpidr_read_val(CPUARMState *env) 2967 { 2968 ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env)); 2969 uint64_t mpidr = cpu->mp_affinity; 2970 2971 if (arm_feature(env, ARM_FEATURE_V7MP)) { 2972 mpidr |= (1U << 31); 2973 /* Cores which are uniprocessor (non-coherent) 2974 * but still implement the MP extensions set 2975 * bit 30. (For instance, Cortex-R5). 2976 */ 2977 if (cpu->mp_is_up) { 2978 mpidr |= (1u << 30); 2979 } 2980 } 2981 return mpidr; 2982 } 2983 2984 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2985 { 2986 unsigned int cur_el = arm_current_el(env); 2987 bool secure = arm_is_secure(env); 2988 2989 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 2990 return env->cp15.vmpidr_el2; 2991 } 2992 return mpidr_read_val(env); 2993 } 2994 2995 static const ARMCPRegInfo mpidr_cp_reginfo[] = { 2996 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH, 2997 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, 2998 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, 2999 REGINFO_SENTINEL 3000 }; 3001 3002 static const ARMCPRegInfo lpae_cp_reginfo[] = { 3003 /* NOP AMAIR0/1 */ 3004 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, 3005 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, 3006 .access = PL1_RW, .type = ARM_CP_CONST, 3007 .resetvalue = 0 }, 3008 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ 3009 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, 3010 .access = PL1_RW, .type = ARM_CP_CONST, 3011 .resetvalue = 0 }, 3012 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, 3013 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, 3014 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), 3015 offsetof(CPUARMState, cp15.par_ns)} }, 3016 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, 3017 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3018 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 3019 offsetof(CPUARMState, cp15.ttbr0_ns) }, 3020 .writefn = vmsa_ttbr_write, }, 3021 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, 3022 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3023 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 3024 offsetof(CPUARMState, cp15.ttbr1_ns) }, 3025 .writefn = vmsa_ttbr_write, }, 3026 REGINFO_SENTINEL 3027 }; 3028 3029 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3030 { 3031 return vfp_get_fpcr(env); 3032 } 3033 3034 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3035 uint64_t value) 3036 { 3037 vfp_set_fpcr(env, value); 3038 } 3039 3040 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3041 { 3042 return vfp_get_fpsr(env); 3043 } 3044 3045 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3046 uint64_t value) 3047 { 3048 vfp_set_fpsr(env, value); 3049 } 3050 3051 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, 3052 bool isread) 3053 { 3054 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) { 3055 return CP_ACCESS_TRAP; 3056 } 3057 return CP_ACCESS_OK; 3058 } 3059 3060 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, 3061 uint64_t value) 3062 { 3063 env->daif = value & PSTATE_DAIF; 3064 } 3065 3066 static CPAccessResult aa64_cacheop_access(CPUARMState *env, 3067 const ARMCPRegInfo *ri, 3068 bool isread) 3069 { 3070 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless 3071 * SCTLR_EL1.UCI is set. 3072 */ 3073 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) { 3074 return CP_ACCESS_TRAP; 3075 } 3076 return CP_ACCESS_OK; 3077 } 3078 3079 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions 3080 * Page D4-1736 (DDI0487A.b) 3081 */ 3082 3083 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3084 uint64_t value) 3085 { 3086 CPUState *cs = ENV_GET_CPU(env); 3087 3088 if (arm_is_secure_below_el3(env)) { 3089 tlb_flush_by_mmuidx(cs, 3090 ARMMMUIdxBit_S1SE1 | 3091 ARMMMUIdxBit_S1SE0); 3092 } else { 3093 tlb_flush_by_mmuidx(cs, 3094 ARMMMUIdxBit_S12NSE1 | 3095 ARMMMUIdxBit_S12NSE0); 3096 } 3097 } 3098 3099 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3100 uint64_t value) 3101 { 3102 CPUState *cs = ENV_GET_CPU(env); 3103 bool sec = arm_is_secure_below_el3(env); 3104 3105 if (sec) { 3106 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3107 ARMMMUIdxBit_S1SE1 | 3108 ARMMMUIdxBit_S1SE0); 3109 } else { 3110 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3111 ARMMMUIdxBit_S12NSE1 | 3112 ARMMMUIdxBit_S12NSE0); 3113 } 3114 } 3115 3116 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3117 uint64_t value) 3118 { 3119 /* Note that the 'ALL' scope must invalidate both stage 1 and 3120 * stage 2 translations, whereas most other scopes only invalidate 3121 * stage 1 translations. 3122 */ 3123 ARMCPU *cpu = arm_env_get_cpu(env); 3124 CPUState *cs = CPU(cpu); 3125 3126 if (arm_is_secure_below_el3(env)) { 3127 tlb_flush_by_mmuidx(cs, 3128 ARMMMUIdxBit_S1SE1 | 3129 ARMMMUIdxBit_S1SE0); 3130 } else { 3131 if (arm_feature(env, ARM_FEATURE_EL2)) { 3132 tlb_flush_by_mmuidx(cs, 3133 ARMMMUIdxBit_S12NSE1 | 3134 ARMMMUIdxBit_S12NSE0 | 3135 ARMMMUIdxBit_S2NS); 3136 } else { 3137 tlb_flush_by_mmuidx(cs, 3138 ARMMMUIdxBit_S12NSE1 | 3139 ARMMMUIdxBit_S12NSE0); 3140 } 3141 } 3142 } 3143 3144 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3145 uint64_t value) 3146 { 3147 ARMCPU *cpu = arm_env_get_cpu(env); 3148 CPUState *cs = CPU(cpu); 3149 3150 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 3151 } 3152 3153 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3154 uint64_t value) 3155 { 3156 ARMCPU *cpu = arm_env_get_cpu(env); 3157 CPUState *cs = CPU(cpu); 3158 3159 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3); 3160 } 3161 3162 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3163 uint64_t value) 3164 { 3165 /* Note that the 'ALL' scope must invalidate both stage 1 and 3166 * stage 2 translations, whereas most other scopes only invalidate 3167 * stage 1 translations. 3168 */ 3169 CPUState *cs = ENV_GET_CPU(env); 3170 bool sec = arm_is_secure_below_el3(env); 3171 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2); 3172 3173 if (sec) { 3174 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3175 ARMMMUIdxBit_S1SE1 | 3176 ARMMMUIdxBit_S1SE0); 3177 } else if (has_el2) { 3178 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3179 ARMMMUIdxBit_S12NSE1 | 3180 ARMMMUIdxBit_S12NSE0 | 3181 ARMMMUIdxBit_S2NS); 3182 } else { 3183 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3184 ARMMMUIdxBit_S12NSE1 | 3185 ARMMMUIdxBit_S12NSE0); 3186 } 3187 } 3188 3189 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3190 uint64_t value) 3191 { 3192 CPUState *cs = ENV_GET_CPU(env); 3193 3194 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 3195 } 3196 3197 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3198 uint64_t value) 3199 { 3200 CPUState *cs = ENV_GET_CPU(env); 3201 3202 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3); 3203 } 3204 3205 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3206 uint64_t value) 3207 { 3208 /* Invalidate by VA, EL1&0 (AArch64 version). 3209 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, 3210 * since we don't support flush-for-specific-ASID-only or 3211 * flush-last-level-only. 3212 */ 3213 ARMCPU *cpu = arm_env_get_cpu(env); 3214 CPUState *cs = CPU(cpu); 3215 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3216 3217 if (arm_is_secure_below_el3(env)) { 3218 tlb_flush_page_by_mmuidx(cs, pageaddr, 3219 ARMMMUIdxBit_S1SE1 | 3220 ARMMMUIdxBit_S1SE0); 3221 } else { 3222 tlb_flush_page_by_mmuidx(cs, pageaddr, 3223 ARMMMUIdxBit_S12NSE1 | 3224 ARMMMUIdxBit_S12NSE0); 3225 } 3226 } 3227 3228 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3229 uint64_t value) 3230 { 3231 /* Invalidate by VA, EL2 3232 * Currently handles both VAE2 and VALE2, since we don't support 3233 * flush-last-level-only. 3234 */ 3235 ARMCPU *cpu = arm_env_get_cpu(env); 3236 CPUState *cs = CPU(cpu); 3237 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3238 3239 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); 3240 } 3241 3242 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3243 uint64_t value) 3244 { 3245 /* Invalidate by VA, EL3 3246 * Currently handles both VAE3 and VALE3, since we don't support 3247 * flush-last-level-only. 3248 */ 3249 ARMCPU *cpu = arm_env_get_cpu(env); 3250 CPUState *cs = CPU(cpu); 3251 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3252 3253 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3); 3254 } 3255 3256 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3257 uint64_t value) 3258 { 3259 ARMCPU *cpu = arm_env_get_cpu(env); 3260 CPUState *cs = CPU(cpu); 3261 bool sec = arm_is_secure_below_el3(env); 3262 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3263 3264 if (sec) { 3265 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3266 ARMMMUIdxBit_S1SE1 | 3267 ARMMMUIdxBit_S1SE0); 3268 } else { 3269 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3270 ARMMMUIdxBit_S12NSE1 | 3271 ARMMMUIdxBit_S12NSE0); 3272 } 3273 } 3274 3275 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3276 uint64_t value) 3277 { 3278 CPUState *cs = ENV_GET_CPU(env); 3279 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3280 3281 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3282 ARMMMUIdxBit_S1E2); 3283 } 3284 3285 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3286 uint64_t value) 3287 { 3288 CPUState *cs = ENV_GET_CPU(env); 3289 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3290 3291 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3292 ARMMMUIdxBit_S1E3); 3293 } 3294 3295 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3296 uint64_t value) 3297 { 3298 /* Invalidate by IPA. This has to invalidate any structures that 3299 * contain only stage 2 translation information, but does not need 3300 * to apply to structures that contain combined stage 1 and stage 2 3301 * translation information. 3302 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 3303 */ 3304 ARMCPU *cpu = arm_env_get_cpu(env); 3305 CPUState *cs = CPU(cpu); 3306 uint64_t pageaddr; 3307 3308 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 3309 return; 3310 } 3311 3312 pageaddr = sextract64(value << 12, 0, 48); 3313 3314 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS); 3315 } 3316 3317 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3318 uint64_t value) 3319 { 3320 CPUState *cs = ENV_GET_CPU(env); 3321 uint64_t pageaddr; 3322 3323 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 3324 return; 3325 } 3326 3327 pageaddr = sextract64(value << 12, 0, 48); 3328 3329 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3330 ARMMMUIdxBit_S2NS); 3331 } 3332 3333 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, 3334 bool isread) 3335 { 3336 /* We don't implement EL2, so the only control on DC ZVA is the 3337 * bit in the SCTLR which can prohibit access for EL0. 3338 */ 3339 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) { 3340 return CP_ACCESS_TRAP; 3341 } 3342 return CP_ACCESS_OK; 3343 } 3344 3345 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 3346 { 3347 ARMCPU *cpu = arm_env_get_cpu(env); 3348 int dzp_bit = 1 << 4; 3349 3350 /* DZP indicates whether DC ZVA access is allowed */ 3351 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { 3352 dzp_bit = 0; 3353 } 3354 return cpu->dcz_blocksize | dzp_bit; 3355 } 3356 3357 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 3358 bool isread) 3359 { 3360 if (!(env->pstate & PSTATE_SP)) { 3361 /* Access to SP_EL0 is undefined if it's being used as 3362 * the stack pointer. 3363 */ 3364 return CP_ACCESS_TRAP_UNCATEGORIZED; 3365 } 3366 return CP_ACCESS_OK; 3367 } 3368 3369 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) 3370 { 3371 return env->pstate & PSTATE_SP; 3372 } 3373 3374 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 3375 { 3376 update_spsel(env, val); 3377 } 3378 3379 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3380 uint64_t value) 3381 { 3382 ARMCPU *cpu = arm_env_get_cpu(env); 3383 3384 if (raw_read(env, ri) == value) { 3385 /* Skip the TLB flush if nothing actually changed; Linux likes 3386 * to do a lot of pointless SCTLR writes. 3387 */ 3388 return; 3389 } 3390 3391 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { 3392 /* M bit is RAZ/WI for PMSA with no MPU implemented */ 3393 value &= ~SCTLR_M; 3394 } 3395 3396 raw_write(env, ri, value); 3397 /* ??? Lots of these bits are not implemented. */ 3398 /* This may enable/disable the MMU, so do a TLB flush. */ 3399 tlb_flush(CPU(cpu)); 3400 } 3401 3402 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, 3403 bool isread) 3404 { 3405 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { 3406 return CP_ACCESS_TRAP_FP_EL2; 3407 } 3408 if (env->cp15.cptr_el[3] & CPTR_TFP) { 3409 return CP_ACCESS_TRAP_FP_EL3; 3410 } 3411 return CP_ACCESS_OK; 3412 } 3413 3414 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3415 uint64_t value) 3416 { 3417 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; 3418 } 3419 3420 static const ARMCPRegInfo v8_cp_reginfo[] = { 3421 /* Minimal set of EL0-visible registers. This will need to be expanded 3422 * significantly for system emulation of AArch64 CPUs. 3423 */ 3424 { .name = "NZCV", .state = ARM_CP_STATE_AA64, 3425 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, 3426 .access = PL0_RW, .type = ARM_CP_NZCV }, 3427 { .name = "DAIF", .state = ARM_CP_STATE_AA64, 3428 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, 3429 .type = ARM_CP_NO_RAW, 3430 .access = PL0_RW, .accessfn = aa64_daif_access, 3431 .fieldoffset = offsetof(CPUARMState, daif), 3432 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, 3433 { .name = "FPCR", .state = ARM_CP_STATE_AA64, 3434 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, 3435 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 3436 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, 3437 { .name = "FPSR", .state = ARM_CP_STATE_AA64, 3438 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, 3439 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 3440 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, 3441 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, 3442 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, 3443 .access = PL0_R, .type = ARM_CP_NO_RAW, 3444 .readfn = aa64_dczid_read }, 3445 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, 3446 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, 3447 .access = PL0_W, .type = ARM_CP_DC_ZVA, 3448 #ifndef CONFIG_USER_ONLY 3449 /* Avoid overhead of an access check that always passes in user-mode */ 3450 .accessfn = aa64_zva_access, 3451 #endif 3452 }, 3453 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, 3454 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, 3455 .access = PL1_R, .type = ARM_CP_CURRENTEL }, 3456 /* Cache ops: all NOPs since we don't emulate caches */ 3457 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, 3458 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 3459 .access = PL1_W, .type = ARM_CP_NOP }, 3460 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, 3461 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 3462 .access = PL1_W, .type = ARM_CP_NOP }, 3463 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, 3464 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, 3465 .access = PL0_W, .type = ARM_CP_NOP, 3466 .accessfn = aa64_cacheop_access }, 3467 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, 3468 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 3469 .access = PL1_W, .type = ARM_CP_NOP }, 3470 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, 3471 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 3472 .access = PL1_W, .type = ARM_CP_NOP }, 3473 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, 3474 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, 3475 .access = PL0_W, .type = ARM_CP_NOP, 3476 .accessfn = aa64_cacheop_access }, 3477 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, 3478 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 3479 .access = PL1_W, .type = ARM_CP_NOP }, 3480 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, 3481 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, 3482 .access = PL0_W, .type = ARM_CP_NOP, 3483 .accessfn = aa64_cacheop_access }, 3484 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, 3485 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, 3486 .access = PL0_W, .type = ARM_CP_NOP, 3487 .accessfn = aa64_cacheop_access }, 3488 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, 3489 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 3490 .access = PL1_W, .type = ARM_CP_NOP }, 3491 /* TLBI operations */ 3492 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, 3493 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 3494 .access = PL1_W, .type = ARM_CP_NO_RAW, 3495 .writefn = tlbi_aa64_vmalle1is_write }, 3496 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, 3497 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 3498 .access = PL1_W, .type = ARM_CP_NO_RAW, 3499 .writefn = tlbi_aa64_vae1is_write }, 3500 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, 3501 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 3502 .access = PL1_W, .type = ARM_CP_NO_RAW, 3503 .writefn = tlbi_aa64_vmalle1is_write }, 3504 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, 3505 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 3506 .access = PL1_W, .type = ARM_CP_NO_RAW, 3507 .writefn = tlbi_aa64_vae1is_write }, 3508 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, 3509 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 3510 .access = PL1_W, .type = ARM_CP_NO_RAW, 3511 .writefn = tlbi_aa64_vae1is_write }, 3512 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, 3513 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 3514 .access = PL1_W, .type = ARM_CP_NO_RAW, 3515 .writefn = tlbi_aa64_vae1is_write }, 3516 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, 3517 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 3518 .access = PL1_W, .type = ARM_CP_NO_RAW, 3519 .writefn = tlbi_aa64_vmalle1_write }, 3520 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, 3521 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 3522 .access = PL1_W, .type = ARM_CP_NO_RAW, 3523 .writefn = tlbi_aa64_vae1_write }, 3524 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, 3525 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 3526 .access = PL1_W, .type = ARM_CP_NO_RAW, 3527 .writefn = tlbi_aa64_vmalle1_write }, 3528 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, 3529 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 3530 .access = PL1_W, .type = ARM_CP_NO_RAW, 3531 .writefn = tlbi_aa64_vae1_write }, 3532 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, 3533 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 3534 .access = PL1_W, .type = ARM_CP_NO_RAW, 3535 .writefn = tlbi_aa64_vae1_write }, 3536 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, 3537 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 3538 .access = PL1_W, .type = ARM_CP_NO_RAW, 3539 .writefn = tlbi_aa64_vae1_write }, 3540 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, 3541 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 3542 .access = PL2_W, .type = ARM_CP_NO_RAW, 3543 .writefn = tlbi_aa64_ipas2e1is_write }, 3544 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, 3545 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 3546 .access = PL2_W, .type = ARM_CP_NO_RAW, 3547 .writefn = tlbi_aa64_ipas2e1is_write }, 3548 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, 3549 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 3550 .access = PL2_W, .type = ARM_CP_NO_RAW, 3551 .writefn = tlbi_aa64_alle1is_write }, 3552 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, 3553 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, 3554 .access = PL2_W, .type = ARM_CP_NO_RAW, 3555 .writefn = tlbi_aa64_alle1is_write }, 3556 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, 3557 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 3558 .access = PL2_W, .type = ARM_CP_NO_RAW, 3559 .writefn = tlbi_aa64_ipas2e1_write }, 3560 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, 3561 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 3562 .access = PL2_W, .type = ARM_CP_NO_RAW, 3563 .writefn = tlbi_aa64_ipas2e1_write }, 3564 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, 3565 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 3566 .access = PL2_W, .type = ARM_CP_NO_RAW, 3567 .writefn = tlbi_aa64_alle1_write }, 3568 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, 3569 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, 3570 .access = PL2_W, .type = ARM_CP_NO_RAW, 3571 .writefn = tlbi_aa64_alle1is_write }, 3572 #ifndef CONFIG_USER_ONLY 3573 /* 64 bit address translation operations */ 3574 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 3575 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, 3576 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3577 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 3578 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, 3579 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3580 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, 3581 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, 3582 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3583 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, 3584 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, 3585 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3586 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, 3587 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, 3588 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3589 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, 3590 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, 3591 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3592 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, 3593 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, 3594 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3595 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, 3596 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, 3597 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3598 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ 3599 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, 3600 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, 3601 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3602 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, 3603 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, 3604 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3605 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, 3606 .type = ARM_CP_ALIAS, 3607 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, 3608 .access = PL1_RW, .resetvalue = 0, 3609 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), 3610 .writefn = par_write }, 3611 #endif 3612 /* TLB invalidate last level of translation table walk */ 3613 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 3614 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 3615 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 3616 .type = ARM_CP_NO_RAW, .access = PL1_W, 3617 .writefn = tlbimvaa_is_write }, 3618 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 3619 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 3620 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 3621 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 3622 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 3623 .type = ARM_CP_NO_RAW, .access = PL2_W, 3624 .writefn = tlbimva_hyp_write }, 3625 { .name = "TLBIMVALHIS", 3626 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 3627 .type = ARM_CP_NO_RAW, .access = PL2_W, 3628 .writefn = tlbimva_hyp_is_write }, 3629 { .name = "TLBIIPAS2", 3630 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 3631 .type = ARM_CP_NO_RAW, .access = PL2_W, 3632 .writefn = tlbiipas2_write }, 3633 { .name = "TLBIIPAS2IS", 3634 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 3635 .type = ARM_CP_NO_RAW, .access = PL2_W, 3636 .writefn = tlbiipas2_is_write }, 3637 { .name = "TLBIIPAS2L", 3638 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 3639 .type = ARM_CP_NO_RAW, .access = PL2_W, 3640 .writefn = tlbiipas2_write }, 3641 { .name = "TLBIIPAS2LIS", 3642 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 3643 .type = ARM_CP_NO_RAW, .access = PL2_W, 3644 .writefn = tlbiipas2_is_write }, 3645 /* 32 bit cache operations */ 3646 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 3647 .type = ARM_CP_NOP, .access = PL1_W }, 3648 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, 3649 .type = ARM_CP_NOP, .access = PL1_W }, 3650 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 3651 .type = ARM_CP_NOP, .access = PL1_W }, 3652 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, 3653 .type = ARM_CP_NOP, .access = PL1_W }, 3654 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, 3655 .type = ARM_CP_NOP, .access = PL1_W }, 3656 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, 3657 .type = ARM_CP_NOP, .access = PL1_W }, 3658 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 3659 .type = ARM_CP_NOP, .access = PL1_W }, 3660 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 3661 .type = ARM_CP_NOP, .access = PL1_W }, 3662 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, 3663 .type = ARM_CP_NOP, .access = PL1_W }, 3664 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 3665 .type = ARM_CP_NOP, .access = PL1_W }, 3666 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, 3667 .type = ARM_CP_NOP, .access = PL1_W }, 3668 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, 3669 .type = ARM_CP_NOP, .access = PL1_W }, 3670 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 3671 .type = ARM_CP_NOP, .access = PL1_W }, 3672 /* MMU Domain access control / MPU write buffer control */ 3673 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, 3674 .access = PL1_RW, .resetvalue = 0, 3675 .writefn = dacr_write, .raw_writefn = raw_write, 3676 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 3677 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 3678 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, 3679 .type = ARM_CP_ALIAS, 3680 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, 3681 .access = PL1_RW, 3682 .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, 3683 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, 3684 .type = ARM_CP_ALIAS, 3685 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, 3686 .access = PL1_RW, 3687 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, 3688 /* We rely on the access checks not allowing the guest to write to the 3689 * state field when SPSel indicates that it's being used as the stack 3690 * pointer. 3691 */ 3692 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, 3693 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, 3694 .access = PL1_RW, .accessfn = sp_el0_access, 3695 .type = ARM_CP_ALIAS, 3696 .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, 3697 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, 3698 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, 3699 .access = PL2_RW, .type = ARM_CP_ALIAS, 3700 .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, 3701 { .name = "SPSel", .state = ARM_CP_STATE_AA64, 3702 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, 3703 .type = ARM_CP_NO_RAW, 3704 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, 3705 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, 3706 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, 3707 .type = ARM_CP_ALIAS, 3708 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), 3709 .access = PL2_RW, .accessfn = fpexc32_access }, 3710 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, 3711 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, 3712 .access = PL2_RW, .resetvalue = 0, 3713 .writefn = dacr_write, .raw_writefn = raw_write, 3714 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, 3715 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, 3716 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, 3717 .access = PL2_RW, .resetvalue = 0, 3718 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, 3719 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, 3720 .type = ARM_CP_ALIAS, 3721 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, 3722 .access = PL2_RW, 3723 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, 3724 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, 3725 .type = ARM_CP_ALIAS, 3726 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, 3727 .access = PL2_RW, 3728 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, 3729 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, 3730 .type = ARM_CP_ALIAS, 3731 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, 3732 .access = PL2_RW, 3733 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, 3734 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, 3735 .type = ARM_CP_ALIAS, 3736 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, 3737 .access = PL2_RW, 3738 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, 3739 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, 3740 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, 3741 .resetvalue = 0, 3742 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, 3743 { .name = "SDCR", .type = ARM_CP_ALIAS, 3744 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, 3745 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 3746 .writefn = sdcr_write, 3747 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, 3748 REGINFO_SENTINEL 3749 }; 3750 3751 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ 3752 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { 3753 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 3754 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 3755 .access = PL2_RW, 3756 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 3757 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 3758 .type = ARM_CP_NO_RAW, 3759 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 3760 .access = PL2_RW, 3761 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 3762 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 3763 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 3764 .access = PL2_RW, 3765 .type = ARM_CP_CONST, .resetvalue = 0 }, 3766 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 3767 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 3768 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3769 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 3770 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 3771 .access = PL2_RW, .type = ARM_CP_CONST, 3772 .resetvalue = 0 }, 3773 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3774 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 3775 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3776 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 3777 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 3778 .access = PL2_RW, .type = ARM_CP_CONST, 3779 .resetvalue = 0 }, 3780 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 3781 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 3782 .access = PL2_RW, .type = ARM_CP_CONST, 3783 .resetvalue = 0 }, 3784 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 3785 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 3786 .access = PL2_RW, .type = ARM_CP_CONST, 3787 .resetvalue = 0 }, 3788 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 3789 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 3790 .access = PL2_RW, .type = ARM_CP_CONST, 3791 .resetvalue = 0 }, 3792 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 3793 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 3794 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3795 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, 3796 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 3797 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 3798 .type = ARM_CP_CONST, .resetvalue = 0 }, 3799 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 3800 .cp = 15, .opc1 = 6, .crm = 2, 3801 .access = PL2_RW, .accessfn = access_el3_aa32ns, 3802 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 3803 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 3804 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 3805 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3806 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 3807 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 3808 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3809 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 3810 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 3811 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3812 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 3813 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 3814 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3815 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 3816 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3817 .resetvalue = 0 }, 3818 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 3819 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 3820 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3821 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 3822 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 3823 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3824 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 3825 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3826 .resetvalue = 0 }, 3827 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 3828 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 3829 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3830 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 3831 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3832 .resetvalue = 0 }, 3833 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 3834 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 3835 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3836 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 3837 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 3838 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3839 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 3840 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 3841 .access = PL2_RW, .accessfn = access_tda, 3842 .type = ARM_CP_CONST, .resetvalue = 0 }, 3843 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, 3844 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 3845 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 3846 .type = ARM_CP_CONST, .resetvalue = 0 }, 3847 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 3848 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 3849 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3850 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 3851 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 3852 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3853 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 3854 .type = ARM_CP_CONST, 3855 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 3856 .access = PL2_RW, .resetvalue = 0 }, 3857 REGINFO_SENTINEL 3858 }; 3859 3860 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3861 { 3862 ARMCPU *cpu = arm_env_get_cpu(env); 3863 uint64_t valid_mask = HCR_MASK; 3864 3865 if (arm_feature(env, ARM_FEATURE_EL3)) { 3866 valid_mask &= ~HCR_HCD; 3867 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { 3868 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. 3869 * However, if we're using the SMC PSCI conduit then QEMU is 3870 * effectively acting like EL3 firmware and so the guest at 3871 * EL2 should retain the ability to prevent EL1 from being 3872 * able to make SMC calls into the ersatz firmware, so in 3873 * that case HCR.TSC should be read/write. 3874 */ 3875 valid_mask &= ~HCR_TSC; 3876 } 3877 3878 /* Clear RES0 bits. */ 3879 value &= valid_mask; 3880 3881 /* These bits change the MMU setup: 3882 * HCR_VM enables stage 2 translation 3883 * HCR_PTW forbids certain page-table setups 3884 * HCR_DC Disables stage1 and enables stage2 translation 3885 */ 3886 if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { 3887 tlb_flush(CPU(cpu)); 3888 } 3889 raw_write(env, ri, value); 3890 } 3891 3892 static const ARMCPRegInfo el2_cp_reginfo[] = { 3893 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 3894 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 3895 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 3896 .writefn = hcr_write }, 3897 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, 3898 .type = ARM_CP_ALIAS, 3899 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, 3900 .access = PL2_RW, 3901 .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, 3902 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 3903 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 3904 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, 3905 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 3906 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 3907 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, 3908 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 3909 .type = ARM_CP_ALIAS, 3910 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 3911 .access = PL2_RW, 3912 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, 3913 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, 3914 .type = ARM_CP_ALIAS, 3915 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, 3916 .access = PL2_RW, 3917 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, 3918 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 3919 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 3920 .access = PL2_RW, .writefn = vbar_write, 3921 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), 3922 .resetvalue = 0 }, 3923 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, 3924 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, 3925 .access = PL3_RW, .type = ARM_CP_ALIAS, 3926 .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, 3927 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 3928 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 3929 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, 3930 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) }, 3931 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 3932 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 3933 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), 3934 .resetvalue = 0 }, 3935 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3936 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 3937 .access = PL2_RW, .type = ARM_CP_ALIAS, 3938 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, 3939 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 3940 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 3941 .access = PL2_RW, .type = ARM_CP_CONST, 3942 .resetvalue = 0 }, 3943 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ 3944 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 3945 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 3946 .access = PL2_RW, .type = ARM_CP_CONST, 3947 .resetvalue = 0 }, 3948 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 3949 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 3950 .access = PL2_RW, .type = ARM_CP_CONST, 3951 .resetvalue = 0 }, 3952 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 3953 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 3954 .access = PL2_RW, .type = ARM_CP_CONST, 3955 .resetvalue = 0 }, 3956 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 3957 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 3958 .access = PL2_RW, 3959 /* no .writefn needed as this can't cause an ASID change; 3960 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 3961 */ 3962 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, 3963 { .name = "VTCR", .state = ARM_CP_STATE_AA32, 3964 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 3965 .type = ARM_CP_ALIAS, 3966 .access = PL2_RW, .accessfn = access_el3_aa32ns, 3967 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 3968 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, 3969 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 3970 .access = PL2_RW, 3971 /* no .writefn needed as this can't cause an ASID change; 3972 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 3973 */ 3974 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 3975 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 3976 .cp = 15, .opc1 = 6, .crm = 2, 3977 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3978 .access = PL2_RW, .accessfn = access_el3_aa32ns, 3979 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), 3980 .writefn = vttbr_write }, 3981 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 3982 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 3983 .access = PL2_RW, .writefn = vttbr_write, 3984 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, 3985 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 3986 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 3987 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, 3988 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, 3989 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 3990 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 3991 .access = PL2_RW, .resetvalue = 0, 3992 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, 3993 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 3994 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 3995 .access = PL2_RW, .resetvalue = 0, 3996 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 3997 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 3998 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3999 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 4000 { .name = "TLBIALLNSNH", 4001 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 4002 .type = ARM_CP_NO_RAW, .access = PL2_W, 4003 .writefn = tlbiall_nsnh_write }, 4004 { .name = "TLBIALLNSNHIS", 4005 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 4006 .type = ARM_CP_NO_RAW, .access = PL2_W, 4007 .writefn = tlbiall_nsnh_is_write }, 4008 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 4009 .type = ARM_CP_NO_RAW, .access = PL2_W, 4010 .writefn = tlbiall_hyp_write }, 4011 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 4012 .type = ARM_CP_NO_RAW, .access = PL2_W, 4013 .writefn = tlbiall_hyp_is_write }, 4014 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 4015 .type = ARM_CP_NO_RAW, .access = PL2_W, 4016 .writefn = tlbimva_hyp_write }, 4017 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 4018 .type = ARM_CP_NO_RAW, .access = PL2_W, 4019 .writefn = tlbimva_hyp_is_write }, 4020 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, 4021 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 4022 .type = ARM_CP_NO_RAW, .access = PL2_W, 4023 .writefn = tlbi_aa64_alle2_write }, 4024 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, 4025 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 4026 .type = ARM_CP_NO_RAW, .access = PL2_W, 4027 .writefn = tlbi_aa64_vae2_write }, 4028 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, 4029 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 4030 .access = PL2_W, .type = ARM_CP_NO_RAW, 4031 .writefn = tlbi_aa64_vae2_write }, 4032 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, 4033 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 4034 .access = PL2_W, .type = ARM_CP_NO_RAW, 4035 .writefn = tlbi_aa64_alle2is_write }, 4036 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, 4037 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 4038 .type = ARM_CP_NO_RAW, .access = PL2_W, 4039 .writefn = tlbi_aa64_vae2is_write }, 4040 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, 4041 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 4042 .access = PL2_W, .type = ARM_CP_NO_RAW, 4043 .writefn = tlbi_aa64_vae2is_write }, 4044 #ifndef CONFIG_USER_ONLY 4045 /* Unlike the other EL2-related AT operations, these must 4046 * UNDEF from EL3 if EL2 is not implemented, which is why we 4047 * define them here rather than with the rest of the AT ops. 4048 */ 4049 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, 4050 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 4051 .access = PL2_W, .accessfn = at_s1e2_access, 4052 .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4053 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, 4054 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 4055 .access = PL2_W, .accessfn = at_s1e2_access, 4056 .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4057 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE 4058 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 4059 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose 4060 * to behave as if SCR.NS was 1. 4061 */ 4062 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 4063 .access = PL2_W, 4064 .writefn = ats1h_write, .type = ARM_CP_NO_RAW }, 4065 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 4066 .access = PL2_W, 4067 .writefn = ats1h_write, .type = ARM_CP_NO_RAW }, 4068 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 4069 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 4070 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the 4071 * reset values as IMPDEF. We choose to reset to 3 to comply with 4072 * both ARMv7 and ARMv8. 4073 */ 4074 .access = PL2_RW, .resetvalue = 3, 4075 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, 4076 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 4077 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 4078 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 4079 .writefn = gt_cntvoff_write, 4080 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 4081 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 4082 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, 4083 .writefn = gt_cntvoff_write, 4084 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 4085 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 4086 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 4087 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 4088 .type = ARM_CP_IO, .access = PL2_RW, 4089 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 4090 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 4091 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 4092 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, 4093 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 4094 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 4095 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 4096 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 4097 .resetfn = gt_hyp_timer_reset, 4098 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, 4099 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 4100 .type = ARM_CP_IO, 4101 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 4102 .access = PL2_RW, 4103 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), 4104 .resetvalue = 0, 4105 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, 4106 #endif 4107 /* The only field of MDCR_EL2 that has a defined architectural reset value 4108 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we 4109 * don't impelment any PMU event counters, so using zero as a reset 4110 * value for MDCR_EL2 is okay 4111 */ 4112 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 4113 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 4114 .access = PL2_RW, .resetvalue = 0, 4115 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, 4116 { .name = "HPFAR", .state = ARM_CP_STATE_AA32, 4117 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4118 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4119 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 4120 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, 4121 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4122 .access = PL2_RW, 4123 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 4124 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 4125 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 4126 .access = PL2_RW, 4127 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, 4128 REGINFO_SENTINEL 4129 }; 4130 4131 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 4132 bool isread) 4133 { 4134 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. 4135 * At Secure EL1 it traps to EL3. 4136 */ 4137 if (arm_current_el(env) == 3) { 4138 return CP_ACCESS_OK; 4139 } 4140 if (arm_is_secure_below_el3(env)) { 4141 return CP_ACCESS_TRAP_EL3; 4142 } 4143 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ 4144 if (isread) { 4145 return CP_ACCESS_OK; 4146 } 4147 return CP_ACCESS_TRAP_UNCATEGORIZED; 4148 } 4149 4150 static const ARMCPRegInfo el3_cp_reginfo[] = { 4151 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, 4152 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, 4153 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), 4154 .resetvalue = 0, .writefn = scr_write }, 4155 { .name = "SCR", .type = ARM_CP_ALIAS, 4156 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, 4157 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4158 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), 4159 .writefn = scr_write }, 4160 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, 4161 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, 4162 .access = PL3_RW, .resetvalue = 0, 4163 .fieldoffset = offsetof(CPUARMState, cp15.sder) }, 4164 { .name = "SDER", 4165 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, 4166 .access = PL3_RW, .resetvalue = 0, 4167 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, 4168 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 4169 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4170 .writefn = vbar_write, .resetvalue = 0, 4171 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, 4172 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, 4173 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, 4174 .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 4175 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, 4176 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, 4177 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, 4178 .access = PL3_RW, 4179 /* no .writefn needed as this can't cause an ASID change; 4180 * we must provide a .raw_writefn and .resetfn because we handle 4181 * reset and migration for the AArch32 TTBCR(S), which might be 4182 * using mask and base_mask. 4183 */ 4184 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, 4185 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, 4186 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, 4187 .type = ARM_CP_ALIAS, 4188 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, 4189 .access = PL3_RW, 4190 .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, 4191 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, 4192 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, 4193 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, 4194 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, 4195 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, 4196 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, 4197 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, 4198 .type = ARM_CP_ALIAS, 4199 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, 4200 .access = PL3_RW, 4201 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, 4202 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, 4203 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, 4204 .access = PL3_RW, .writefn = vbar_write, 4205 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), 4206 .resetvalue = 0 }, 4207 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, 4208 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, 4209 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, 4210 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, 4211 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, 4212 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, 4213 .access = PL3_RW, .resetvalue = 0, 4214 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, 4215 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, 4216 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, 4217 .access = PL3_RW, .type = ARM_CP_CONST, 4218 .resetvalue = 0 }, 4219 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, 4220 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, 4221 .access = PL3_RW, .type = ARM_CP_CONST, 4222 .resetvalue = 0 }, 4223 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, 4224 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, 4225 .access = PL3_RW, .type = ARM_CP_CONST, 4226 .resetvalue = 0 }, 4227 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, 4228 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, 4229 .access = PL3_W, .type = ARM_CP_NO_RAW, 4230 .writefn = tlbi_aa64_alle3is_write }, 4231 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, 4232 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, 4233 .access = PL3_W, .type = ARM_CP_NO_RAW, 4234 .writefn = tlbi_aa64_vae3is_write }, 4235 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, 4236 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, 4237 .access = PL3_W, .type = ARM_CP_NO_RAW, 4238 .writefn = tlbi_aa64_vae3is_write }, 4239 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, 4240 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, 4241 .access = PL3_W, .type = ARM_CP_NO_RAW, 4242 .writefn = tlbi_aa64_alle3_write }, 4243 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, 4244 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, 4245 .access = PL3_W, .type = ARM_CP_NO_RAW, 4246 .writefn = tlbi_aa64_vae3_write }, 4247 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, 4248 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, 4249 .access = PL3_W, .type = ARM_CP_NO_RAW, 4250 .writefn = tlbi_aa64_vae3_write }, 4251 REGINFO_SENTINEL 4252 }; 4253 4254 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 4255 bool isread) 4256 { 4257 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64, 4258 * but the AArch32 CTR has its own reginfo struct) 4259 */ 4260 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) { 4261 return CP_ACCESS_TRAP; 4262 } 4263 return CP_ACCESS_OK; 4264 } 4265 4266 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 4267 uint64_t value) 4268 { 4269 /* Writes to OSLAR_EL1 may update the OS lock status, which can be 4270 * read via a bit in OSLSR_EL1. 4271 */ 4272 int oslock; 4273 4274 if (ri->state == ARM_CP_STATE_AA32) { 4275 oslock = (value == 0xC5ACCE55); 4276 } else { 4277 oslock = value & 1; 4278 } 4279 4280 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 4281 } 4282 4283 static const ARMCPRegInfo debug_cp_reginfo[] = { 4284 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 4285 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 4286 * unlike DBGDRAR it is never accessible from EL0. 4287 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 4288 * accessor. 4289 */ 4290 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 4291 .access = PL0_R, .accessfn = access_tdra, 4292 .type = ARM_CP_CONST, .resetvalue = 0 }, 4293 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 4294 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 4295 .access = PL1_R, .accessfn = access_tdra, 4296 .type = ARM_CP_CONST, .resetvalue = 0 }, 4297 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 4298 .access = PL0_R, .accessfn = access_tdra, 4299 .type = ARM_CP_CONST, .resetvalue = 0 }, 4300 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 4301 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 4302 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 4303 .access = PL1_RW, .accessfn = access_tda, 4304 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 4305 .resetvalue = 0 }, 4306 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. 4307 * We don't implement the configurable EL0 access. 4308 */ 4309 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH, 4310 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 4311 .type = ARM_CP_ALIAS, 4312 .access = PL1_R, .accessfn = access_tda, 4313 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 4314 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 4315 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 4316 .access = PL1_W, .type = ARM_CP_NO_RAW, 4317 .accessfn = access_tdosa, 4318 .writefn = oslar_write }, 4319 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 4320 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 4321 .access = PL1_R, .resetvalue = 10, 4322 .accessfn = access_tdosa, 4323 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 4324 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 4325 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 4326 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 4327 .access = PL1_RW, .accessfn = access_tdosa, 4328 .type = ARM_CP_NOP }, 4329 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't 4330 * implement vector catch debug events yet. 4331 */ 4332 { .name = "DBGVCR", 4333 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 4334 .access = PL1_RW, .accessfn = access_tda, 4335 .type = ARM_CP_NOP }, 4336 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 4337 * to save and restore a 32-bit guest's DBGVCR) 4338 */ 4339 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 4340 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 4341 .access = PL2_RW, .accessfn = access_tda, 4342 .type = ARM_CP_NOP }, 4343 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications 4344 * Channel but Linux may try to access this register. The 32-bit 4345 * alias is DBGDCCINT. 4346 */ 4347 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 4348 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 4349 .access = PL1_RW, .accessfn = access_tda, 4350 .type = ARM_CP_NOP }, 4351 REGINFO_SENTINEL 4352 }; 4353 4354 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 4355 /* 64 bit access versions of the (dummy) debug registers */ 4356 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 4357 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 4358 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 4359 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 4360 REGINFO_SENTINEL 4361 }; 4362 4363 /* Return the exception level to which SVE-disabled exceptions should 4364 * be taken, or 0 if SVE is enabled. 4365 */ 4366 static int sve_exception_el(CPUARMState *env) 4367 { 4368 #ifndef CONFIG_USER_ONLY 4369 unsigned current_el = arm_current_el(env); 4370 4371 /* The CPACR.ZEN controls traps to EL1: 4372 * 0, 2 : trap EL0 and EL1 accesses 4373 * 1 : trap only EL0 accesses 4374 * 3 : trap no accesses 4375 */ 4376 switch (extract32(env->cp15.cpacr_el1, 16, 2)) { 4377 default: 4378 if (current_el <= 1) { 4379 /* Trap to PL1, which might be EL1 or EL3 */ 4380 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 4381 return 3; 4382 } 4383 return 1; 4384 } 4385 break; 4386 case 1: 4387 if (current_el == 0) { 4388 return 1; 4389 } 4390 break; 4391 case 3: 4392 break; 4393 } 4394 4395 /* Similarly for CPACR.FPEN, after having checked ZEN. */ 4396 switch (extract32(env->cp15.cpacr_el1, 20, 2)) { 4397 default: 4398 if (current_el <= 1) { 4399 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 4400 return 3; 4401 } 4402 return 1; 4403 } 4404 break; 4405 case 1: 4406 if (current_el == 0) { 4407 return 1; 4408 } 4409 break; 4410 case 3: 4411 break; 4412 } 4413 4414 /* CPTR_EL2. Check both TZ and TFP. */ 4415 if (current_el <= 2 4416 && (env->cp15.cptr_el[2] & (CPTR_TFP | CPTR_TZ)) 4417 && !arm_is_secure_below_el3(env)) { 4418 return 2; 4419 } 4420 4421 /* CPTR_EL3. Check both EZ and TFP. */ 4422 if (!(env->cp15.cptr_el[3] & CPTR_EZ) 4423 || (env->cp15.cptr_el[3] & CPTR_TFP)) { 4424 return 3; 4425 } 4426 #endif 4427 return 0; 4428 } 4429 4430 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4431 uint64_t value) 4432 { 4433 /* Bits other than [3:0] are RAZ/WI. */ 4434 raw_write(env, ri, value & 0xf); 4435 } 4436 4437 static const ARMCPRegInfo zcr_el1_reginfo = { 4438 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, 4439 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, 4440 .access = PL1_RW, .type = ARM_CP_SVE, 4441 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), 4442 .writefn = zcr_write, .raw_writefn = raw_write 4443 }; 4444 4445 static const ARMCPRegInfo zcr_el2_reginfo = { 4446 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 4447 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 4448 .access = PL2_RW, .type = ARM_CP_SVE, 4449 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), 4450 .writefn = zcr_write, .raw_writefn = raw_write 4451 }; 4452 4453 static const ARMCPRegInfo zcr_no_el2_reginfo = { 4454 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 4455 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 4456 .access = PL2_RW, .type = ARM_CP_SVE, 4457 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore 4458 }; 4459 4460 static const ARMCPRegInfo zcr_el3_reginfo = { 4461 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, 4462 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, 4463 .access = PL3_RW, .type = ARM_CP_SVE, 4464 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), 4465 .writefn = zcr_write, .raw_writefn = raw_write 4466 }; 4467 4468 void hw_watchpoint_update(ARMCPU *cpu, int n) 4469 { 4470 CPUARMState *env = &cpu->env; 4471 vaddr len = 0; 4472 vaddr wvr = env->cp15.dbgwvr[n]; 4473 uint64_t wcr = env->cp15.dbgwcr[n]; 4474 int mask; 4475 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 4476 4477 if (env->cpu_watchpoint[n]) { 4478 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 4479 env->cpu_watchpoint[n] = NULL; 4480 } 4481 4482 if (!extract64(wcr, 0, 1)) { 4483 /* E bit clear : watchpoint disabled */ 4484 return; 4485 } 4486 4487 switch (extract64(wcr, 3, 2)) { 4488 case 0: 4489 /* LSC 00 is reserved and must behave as if the wp is disabled */ 4490 return; 4491 case 1: 4492 flags |= BP_MEM_READ; 4493 break; 4494 case 2: 4495 flags |= BP_MEM_WRITE; 4496 break; 4497 case 3: 4498 flags |= BP_MEM_ACCESS; 4499 break; 4500 } 4501 4502 /* Attempts to use both MASK and BAS fields simultaneously are 4503 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 4504 * thus generating a watchpoint for every byte in the masked region. 4505 */ 4506 mask = extract64(wcr, 24, 4); 4507 if (mask == 1 || mask == 2) { 4508 /* Reserved values of MASK; we must act as if the mask value was 4509 * some non-reserved value, or as if the watchpoint were disabled. 4510 * We choose the latter. 4511 */ 4512 return; 4513 } else if (mask) { 4514 /* Watchpoint covers an aligned area up to 2GB in size */ 4515 len = 1ULL << mask; 4516 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 4517 * whether the watchpoint fires when the unmasked bits match; we opt 4518 * to generate the exceptions. 4519 */ 4520 wvr &= ~(len - 1); 4521 } else { 4522 /* Watchpoint covers bytes defined by the byte address select bits */ 4523 int bas = extract64(wcr, 5, 8); 4524 int basstart; 4525 4526 if (bas == 0) { 4527 /* This must act as if the watchpoint is disabled */ 4528 return; 4529 } 4530 4531 if (extract64(wvr, 2, 1)) { 4532 /* Deprecated case of an only 4-aligned address. BAS[7:4] are 4533 * ignored, and BAS[3:0] define which bytes to watch. 4534 */ 4535 bas &= 0xf; 4536 } 4537 /* The BAS bits are supposed to be programmed to indicate a contiguous 4538 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 4539 * we fire for each byte in the word/doubleword addressed by the WVR. 4540 * We choose to ignore any non-zero bits after the first range of 1s. 4541 */ 4542 basstart = ctz32(bas); 4543 len = cto32(bas >> basstart); 4544 wvr += basstart; 4545 } 4546 4547 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 4548 &env->cpu_watchpoint[n]); 4549 } 4550 4551 void hw_watchpoint_update_all(ARMCPU *cpu) 4552 { 4553 int i; 4554 CPUARMState *env = &cpu->env; 4555 4556 /* Completely clear out existing QEMU watchpoints and our array, to 4557 * avoid possible stale entries following migration load. 4558 */ 4559 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 4560 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 4561 4562 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 4563 hw_watchpoint_update(cpu, i); 4564 } 4565 } 4566 4567 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4568 uint64_t value) 4569 { 4570 ARMCPU *cpu = arm_env_get_cpu(env); 4571 int i = ri->crm; 4572 4573 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the 4574 * register reads and behaves as if values written are sign extended. 4575 * Bits [1:0] are RES0. 4576 */ 4577 value = sextract64(value, 0, 49) & ~3ULL; 4578 4579 raw_write(env, ri, value); 4580 hw_watchpoint_update(cpu, i); 4581 } 4582 4583 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4584 uint64_t value) 4585 { 4586 ARMCPU *cpu = arm_env_get_cpu(env); 4587 int i = ri->crm; 4588 4589 raw_write(env, ri, value); 4590 hw_watchpoint_update(cpu, i); 4591 } 4592 4593 void hw_breakpoint_update(ARMCPU *cpu, int n) 4594 { 4595 CPUARMState *env = &cpu->env; 4596 uint64_t bvr = env->cp15.dbgbvr[n]; 4597 uint64_t bcr = env->cp15.dbgbcr[n]; 4598 vaddr addr; 4599 int bt; 4600 int flags = BP_CPU; 4601 4602 if (env->cpu_breakpoint[n]) { 4603 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 4604 env->cpu_breakpoint[n] = NULL; 4605 } 4606 4607 if (!extract64(bcr, 0, 1)) { 4608 /* E bit clear : watchpoint disabled */ 4609 return; 4610 } 4611 4612 bt = extract64(bcr, 20, 4); 4613 4614 switch (bt) { 4615 case 4: /* unlinked address mismatch (reserved if AArch64) */ 4616 case 5: /* linked address mismatch (reserved if AArch64) */ 4617 qemu_log_mask(LOG_UNIMP, 4618 "arm: address mismatch breakpoint types not implemented\n"); 4619 return; 4620 case 0: /* unlinked address match */ 4621 case 1: /* linked address match */ 4622 { 4623 /* Bits [63:49] are hardwired to the value of bit [48]; that is, 4624 * we behave as if the register was sign extended. Bits [1:0] are 4625 * RES0. The BAS field is used to allow setting breakpoints on 16 4626 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether 4627 * a bp will fire if the addresses covered by the bp and the addresses 4628 * covered by the insn overlap but the insn doesn't start at the 4629 * start of the bp address range. We choose to require the insn and 4630 * the bp to have the same address. The constraints on writing to 4631 * BAS enforced in dbgbcr_write mean we have only four cases: 4632 * 0b0000 => no breakpoint 4633 * 0b0011 => breakpoint on addr 4634 * 0b1100 => breakpoint on addr + 2 4635 * 0b1111 => breakpoint on addr 4636 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 4637 */ 4638 int bas = extract64(bcr, 5, 4); 4639 addr = sextract64(bvr, 0, 49) & ~3ULL; 4640 if (bas == 0) { 4641 return; 4642 } 4643 if (bas == 0xc) { 4644 addr += 2; 4645 } 4646 break; 4647 } 4648 case 2: /* unlinked context ID match */ 4649 case 8: /* unlinked VMID match (reserved if no EL2) */ 4650 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 4651 qemu_log_mask(LOG_UNIMP, 4652 "arm: unlinked context breakpoint types not implemented\n"); 4653 return; 4654 case 9: /* linked VMID match (reserved if no EL2) */ 4655 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 4656 case 3: /* linked context ID match */ 4657 default: 4658 /* We must generate no events for Linked context matches (unless 4659 * they are linked to by some other bp/wp, which is handled in 4660 * updates for the linking bp/wp). We choose to also generate no events 4661 * for reserved values. 4662 */ 4663 return; 4664 } 4665 4666 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 4667 } 4668 4669 void hw_breakpoint_update_all(ARMCPU *cpu) 4670 { 4671 int i; 4672 CPUARMState *env = &cpu->env; 4673 4674 /* Completely clear out existing QEMU breakpoints and our array, to 4675 * avoid possible stale entries following migration load. 4676 */ 4677 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 4678 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 4679 4680 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 4681 hw_breakpoint_update(cpu, i); 4682 } 4683 } 4684 4685 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4686 uint64_t value) 4687 { 4688 ARMCPU *cpu = arm_env_get_cpu(env); 4689 int i = ri->crm; 4690 4691 raw_write(env, ri, value); 4692 hw_breakpoint_update(cpu, i); 4693 } 4694 4695 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4696 uint64_t value) 4697 { 4698 ARMCPU *cpu = arm_env_get_cpu(env); 4699 int i = ri->crm; 4700 4701 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 4702 * copy of BAS[0]. 4703 */ 4704 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 4705 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 4706 4707 raw_write(env, ri, value); 4708 hw_breakpoint_update(cpu, i); 4709 } 4710 4711 static void define_debug_regs(ARMCPU *cpu) 4712 { 4713 /* Define v7 and v8 architectural debug registers. 4714 * These are just dummy implementations for now. 4715 */ 4716 int i; 4717 int wrps, brps, ctx_cmps; 4718 ARMCPRegInfo dbgdidr = { 4719 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 4720 .access = PL0_R, .accessfn = access_tda, 4721 .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr, 4722 }; 4723 4724 /* Note that all these register fields hold "number of Xs minus 1". */ 4725 brps = extract32(cpu->dbgdidr, 24, 4); 4726 wrps = extract32(cpu->dbgdidr, 28, 4); 4727 ctx_cmps = extract32(cpu->dbgdidr, 20, 4); 4728 4729 assert(ctx_cmps <= brps); 4730 4731 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties 4732 * of the debug registers such as number of breakpoints; 4733 * check that if they both exist then they agree. 4734 */ 4735 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 4736 assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps); 4737 assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps); 4738 assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps); 4739 } 4740 4741 define_one_arm_cp_reg(cpu, &dbgdidr); 4742 define_arm_cp_regs(cpu, debug_cp_reginfo); 4743 4744 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 4745 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 4746 } 4747 4748 for (i = 0; i < brps + 1; i++) { 4749 ARMCPRegInfo dbgregs[] = { 4750 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, 4751 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 4752 .access = PL1_RW, .accessfn = access_tda, 4753 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 4754 .writefn = dbgbvr_write, .raw_writefn = raw_write 4755 }, 4756 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, 4757 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 4758 .access = PL1_RW, .accessfn = access_tda, 4759 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 4760 .writefn = dbgbcr_write, .raw_writefn = raw_write 4761 }, 4762 REGINFO_SENTINEL 4763 }; 4764 define_arm_cp_regs(cpu, dbgregs); 4765 } 4766 4767 for (i = 0; i < wrps + 1; i++) { 4768 ARMCPRegInfo dbgregs[] = { 4769 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, 4770 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 4771 .access = PL1_RW, .accessfn = access_tda, 4772 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 4773 .writefn = dbgwvr_write, .raw_writefn = raw_write 4774 }, 4775 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, 4776 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 4777 .access = PL1_RW, .accessfn = access_tda, 4778 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 4779 .writefn = dbgwcr_write, .raw_writefn = raw_write 4780 }, 4781 REGINFO_SENTINEL 4782 }; 4783 define_arm_cp_regs(cpu, dbgregs); 4784 } 4785 } 4786 4787 /* We don't know until after realize whether there's a GICv3 4788 * attached, and that is what registers the gicv3 sysregs. 4789 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 4790 * at runtime. 4791 */ 4792 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 4793 { 4794 ARMCPU *cpu = arm_env_get_cpu(env); 4795 uint64_t pfr1 = cpu->id_pfr1; 4796 4797 if (env->gicv3state) { 4798 pfr1 |= 1 << 28; 4799 } 4800 return pfr1; 4801 } 4802 4803 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 4804 { 4805 ARMCPU *cpu = arm_env_get_cpu(env); 4806 uint64_t pfr0 = cpu->id_aa64pfr0; 4807 4808 if (env->gicv3state) { 4809 pfr0 |= 1 << 24; 4810 } 4811 return pfr0; 4812 } 4813 4814 void register_cp_regs_for_features(ARMCPU *cpu) 4815 { 4816 /* Register all the coprocessor registers based on feature bits */ 4817 CPUARMState *env = &cpu->env; 4818 if (arm_feature(env, ARM_FEATURE_M)) { 4819 /* M profile has no coprocessor registers */ 4820 return; 4821 } 4822 4823 define_arm_cp_regs(cpu, cp_reginfo); 4824 if (!arm_feature(env, ARM_FEATURE_V8)) { 4825 /* Must go early as it is full of wildcards that may be 4826 * overridden by later definitions. 4827 */ 4828 define_arm_cp_regs(cpu, not_v8_cp_reginfo); 4829 } 4830 4831 if (arm_feature(env, ARM_FEATURE_V6)) { 4832 /* The ID registers all have impdef reset values */ 4833 ARMCPRegInfo v6_idregs[] = { 4834 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, 4835 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 4836 .access = PL1_R, .type = ARM_CP_CONST, 4837 .resetvalue = cpu->id_pfr0 }, 4838 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know 4839 * the value of the GIC field until after we define these regs. 4840 */ 4841 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, 4842 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, 4843 .access = PL1_R, .type = ARM_CP_NO_RAW, 4844 .readfn = id_pfr1_read, 4845 .writefn = arm_cp_write_ignore }, 4846 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, 4847 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, 4848 .access = PL1_R, .type = ARM_CP_CONST, 4849 .resetvalue = cpu->id_dfr0 }, 4850 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, 4851 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, 4852 .access = PL1_R, .type = ARM_CP_CONST, 4853 .resetvalue = cpu->id_afr0 }, 4854 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, 4855 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, 4856 .access = PL1_R, .type = ARM_CP_CONST, 4857 .resetvalue = cpu->id_mmfr0 }, 4858 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, 4859 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, 4860 .access = PL1_R, .type = ARM_CP_CONST, 4861 .resetvalue = cpu->id_mmfr1 }, 4862 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, 4863 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, 4864 .access = PL1_R, .type = ARM_CP_CONST, 4865 .resetvalue = cpu->id_mmfr2 }, 4866 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, 4867 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, 4868 .access = PL1_R, .type = ARM_CP_CONST, 4869 .resetvalue = cpu->id_mmfr3 }, 4870 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, 4871 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 4872 .access = PL1_R, .type = ARM_CP_CONST, 4873 .resetvalue = cpu->id_isar0 }, 4874 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, 4875 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, 4876 .access = PL1_R, .type = ARM_CP_CONST, 4877 .resetvalue = cpu->id_isar1 }, 4878 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, 4879 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 4880 .access = PL1_R, .type = ARM_CP_CONST, 4881 .resetvalue = cpu->id_isar2 }, 4882 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, 4883 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, 4884 .access = PL1_R, .type = ARM_CP_CONST, 4885 .resetvalue = cpu->id_isar3 }, 4886 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, 4887 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, 4888 .access = PL1_R, .type = ARM_CP_CONST, 4889 .resetvalue = cpu->id_isar4 }, 4890 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, 4891 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, 4892 .access = PL1_R, .type = ARM_CP_CONST, 4893 .resetvalue = cpu->id_isar5 }, 4894 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, 4895 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, 4896 .access = PL1_R, .type = ARM_CP_CONST, 4897 .resetvalue = cpu->id_mmfr4 }, 4898 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, 4899 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, 4900 .access = PL1_R, .type = ARM_CP_CONST, 4901 .resetvalue = cpu->id_isar6 }, 4902 REGINFO_SENTINEL 4903 }; 4904 define_arm_cp_regs(cpu, v6_idregs); 4905 define_arm_cp_regs(cpu, v6_cp_reginfo); 4906 } else { 4907 define_arm_cp_regs(cpu, not_v6_cp_reginfo); 4908 } 4909 if (arm_feature(env, ARM_FEATURE_V6K)) { 4910 define_arm_cp_regs(cpu, v6k_cp_reginfo); 4911 } 4912 if (arm_feature(env, ARM_FEATURE_V7MP) && 4913 !arm_feature(env, ARM_FEATURE_PMSA)) { 4914 define_arm_cp_regs(cpu, v7mp_cp_reginfo); 4915 } 4916 if (arm_feature(env, ARM_FEATURE_V7)) { 4917 /* v7 performance monitor control register: same implementor 4918 * field as main ID register, and we implement only the cycle 4919 * count register. 4920 */ 4921 #ifndef CONFIG_USER_ONLY 4922 ARMCPRegInfo pmcr = { 4923 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, 4924 .access = PL0_RW, 4925 .type = ARM_CP_IO | ARM_CP_ALIAS, 4926 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), 4927 .accessfn = pmreg_access, .writefn = pmcr_write, 4928 .raw_writefn = raw_write, 4929 }; 4930 ARMCPRegInfo pmcr64 = { 4931 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, 4932 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, 4933 .access = PL0_RW, .accessfn = pmreg_access, 4934 .type = ARM_CP_IO, 4935 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), 4936 .resetvalue = cpu->midr & 0xff000000, 4937 .writefn = pmcr_write, .raw_writefn = raw_write, 4938 }; 4939 define_one_arm_cp_reg(cpu, &pmcr); 4940 define_one_arm_cp_reg(cpu, &pmcr64); 4941 #endif 4942 ARMCPRegInfo clidr = { 4943 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, 4944 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, 4945 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr 4946 }; 4947 define_one_arm_cp_reg(cpu, &clidr); 4948 define_arm_cp_regs(cpu, v7_cp_reginfo); 4949 define_debug_regs(cpu); 4950 } else { 4951 define_arm_cp_regs(cpu, not_v7_cp_reginfo); 4952 } 4953 if (arm_feature(env, ARM_FEATURE_V8)) { 4954 /* AArch64 ID registers, which all have impdef reset values. 4955 * Note that within the ID register ranges the unused slots 4956 * must all RAZ, not UNDEF; future architecture versions may 4957 * define new registers here. 4958 */ 4959 ARMCPRegInfo v8_idregs[] = { 4960 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't 4961 * know the right value for the GIC field until after we 4962 * define these regs. 4963 */ 4964 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, 4965 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, 4966 .access = PL1_R, .type = ARM_CP_NO_RAW, 4967 .readfn = id_aa64pfr0_read, 4968 .writefn = arm_cp_write_ignore }, 4969 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, 4970 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, 4971 .access = PL1_R, .type = ARM_CP_CONST, 4972 .resetvalue = cpu->id_aa64pfr1}, 4973 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4974 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, 4975 .access = PL1_R, .type = ARM_CP_CONST, 4976 .resetvalue = 0 }, 4977 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4978 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, 4979 .access = PL1_R, .type = ARM_CP_CONST, 4980 .resetvalue = 0 }, 4981 { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4982 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, 4983 .access = PL1_R, .type = ARM_CP_CONST, 4984 .resetvalue = 0 }, 4985 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4986 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, 4987 .access = PL1_R, .type = ARM_CP_CONST, 4988 .resetvalue = 0 }, 4989 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4990 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, 4991 .access = PL1_R, .type = ARM_CP_CONST, 4992 .resetvalue = 0 }, 4993 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4994 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, 4995 .access = PL1_R, .type = ARM_CP_CONST, 4996 .resetvalue = 0 }, 4997 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, 4998 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, 4999 .access = PL1_R, .type = ARM_CP_CONST, 5000 .resetvalue = cpu->id_aa64dfr0 }, 5001 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, 5002 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, 5003 .access = PL1_R, .type = ARM_CP_CONST, 5004 .resetvalue = cpu->id_aa64dfr1 }, 5005 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5006 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, 5007 .access = PL1_R, .type = ARM_CP_CONST, 5008 .resetvalue = 0 }, 5009 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5010 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, 5011 .access = PL1_R, .type = ARM_CP_CONST, 5012 .resetvalue = 0 }, 5013 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, 5014 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, 5015 .access = PL1_R, .type = ARM_CP_CONST, 5016 .resetvalue = cpu->id_aa64afr0 }, 5017 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, 5018 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, 5019 .access = PL1_R, .type = ARM_CP_CONST, 5020 .resetvalue = cpu->id_aa64afr1 }, 5021 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5022 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, 5023 .access = PL1_R, .type = ARM_CP_CONST, 5024 .resetvalue = 0 }, 5025 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5026 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, 5027 .access = PL1_R, .type = ARM_CP_CONST, 5028 .resetvalue = 0 }, 5029 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, 5030 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, 5031 .access = PL1_R, .type = ARM_CP_CONST, 5032 .resetvalue = cpu->id_aa64isar0 }, 5033 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, 5034 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, 5035 .access = PL1_R, .type = ARM_CP_CONST, 5036 .resetvalue = cpu->id_aa64isar1 }, 5037 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5038 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 5039 .access = PL1_R, .type = ARM_CP_CONST, 5040 .resetvalue = 0 }, 5041 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5042 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, 5043 .access = PL1_R, .type = ARM_CP_CONST, 5044 .resetvalue = 0 }, 5045 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5046 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, 5047 .access = PL1_R, .type = ARM_CP_CONST, 5048 .resetvalue = 0 }, 5049 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5050 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, 5051 .access = PL1_R, .type = ARM_CP_CONST, 5052 .resetvalue = 0 }, 5053 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5054 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, 5055 .access = PL1_R, .type = ARM_CP_CONST, 5056 .resetvalue = 0 }, 5057 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5058 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, 5059 .access = PL1_R, .type = ARM_CP_CONST, 5060 .resetvalue = 0 }, 5061 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, 5062 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 5063 .access = PL1_R, .type = ARM_CP_CONST, 5064 .resetvalue = cpu->id_aa64mmfr0 }, 5065 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, 5066 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, 5067 .access = PL1_R, .type = ARM_CP_CONST, 5068 .resetvalue = cpu->id_aa64mmfr1 }, 5069 { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5070 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, 5071 .access = PL1_R, .type = ARM_CP_CONST, 5072 .resetvalue = 0 }, 5073 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5074 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, 5075 .access = PL1_R, .type = ARM_CP_CONST, 5076 .resetvalue = 0 }, 5077 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5078 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, 5079 .access = PL1_R, .type = ARM_CP_CONST, 5080 .resetvalue = 0 }, 5081 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5082 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, 5083 .access = PL1_R, .type = ARM_CP_CONST, 5084 .resetvalue = 0 }, 5085 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5086 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, 5087 .access = PL1_R, .type = ARM_CP_CONST, 5088 .resetvalue = 0 }, 5089 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5090 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, 5091 .access = PL1_R, .type = ARM_CP_CONST, 5092 .resetvalue = 0 }, 5093 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, 5094 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 5095 .access = PL1_R, .type = ARM_CP_CONST, 5096 .resetvalue = cpu->mvfr0 }, 5097 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, 5098 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 5099 .access = PL1_R, .type = ARM_CP_CONST, 5100 .resetvalue = cpu->mvfr1 }, 5101 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, 5102 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 5103 .access = PL1_R, .type = ARM_CP_CONST, 5104 .resetvalue = cpu->mvfr2 }, 5105 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5106 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, 5107 .access = PL1_R, .type = ARM_CP_CONST, 5108 .resetvalue = 0 }, 5109 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5110 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, 5111 .access = PL1_R, .type = ARM_CP_CONST, 5112 .resetvalue = 0 }, 5113 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5114 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, 5115 .access = PL1_R, .type = ARM_CP_CONST, 5116 .resetvalue = 0 }, 5117 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5118 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, 5119 .access = PL1_R, .type = ARM_CP_CONST, 5120 .resetvalue = 0 }, 5121 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5122 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, 5123 .access = PL1_R, .type = ARM_CP_CONST, 5124 .resetvalue = 0 }, 5125 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, 5126 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, 5127 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5128 .resetvalue = cpu->pmceid0 }, 5129 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, 5130 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, 5131 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5132 .resetvalue = cpu->pmceid0 }, 5133 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, 5134 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, 5135 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5136 .resetvalue = cpu->pmceid1 }, 5137 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, 5138 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, 5139 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5140 .resetvalue = cpu->pmceid1 }, 5141 REGINFO_SENTINEL 5142 }; 5143 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ 5144 if (!arm_feature(env, ARM_FEATURE_EL3) && 5145 !arm_feature(env, ARM_FEATURE_EL2)) { 5146 ARMCPRegInfo rvbar = { 5147 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, 5148 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 5149 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar 5150 }; 5151 define_one_arm_cp_reg(cpu, &rvbar); 5152 } 5153 define_arm_cp_regs(cpu, v8_idregs); 5154 define_arm_cp_regs(cpu, v8_cp_reginfo); 5155 } 5156 if (arm_feature(env, ARM_FEATURE_EL2)) { 5157 uint64_t vmpidr_def = mpidr_read_val(env); 5158 ARMCPRegInfo vpidr_regs[] = { 5159 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, 5160 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5161 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5162 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, 5163 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, 5164 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, 5165 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5166 .access = PL2_RW, .resetvalue = cpu->midr, 5167 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 5168 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, 5169 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5170 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5171 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, 5172 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, 5173 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, 5174 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5175 .access = PL2_RW, 5176 .resetvalue = vmpidr_def, 5177 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 5178 REGINFO_SENTINEL 5179 }; 5180 define_arm_cp_regs(cpu, vpidr_regs); 5181 define_arm_cp_regs(cpu, el2_cp_reginfo); 5182 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ 5183 if (!arm_feature(env, ARM_FEATURE_EL3)) { 5184 ARMCPRegInfo rvbar = { 5185 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, 5186 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, 5187 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar 5188 }; 5189 define_one_arm_cp_reg(cpu, &rvbar); 5190 } 5191 } else { 5192 /* If EL2 is missing but higher ELs are enabled, we need to 5193 * register the no_el2 reginfos. 5194 */ 5195 if (arm_feature(env, ARM_FEATURE_EL3)) { 5196 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value 5197 * of MIDR_EL1 and MPIDR_EL1. 5198 */ 5199 ARMCPRegInfo vpidr_regs[] = { 5200 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5201 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5202 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5203 .type = ARM_CP_CONST, .resetvalue = cpu->midr, 5204 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 5205 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5206 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5207 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5208 .type = ARM_CP_NO_RAW, 5209 .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, 5210 REGINFO_SENTINEL 5211 }; 5212 define_arm_cp_regs(cpu, vpidr_regs); 5213 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); 5214 } 5215 } 5216 if (arm_feature(env, ARM_FEATURE_EL3)) { 5217 define_arm_cp_regs(cpu, el3_cp_reginfo); 5218 ARMCPRegInfo el3_regs[] = { 5219 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, 5220 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, 5221 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, 5222 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, 5223 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, 5224 .access = PL3_RW, 5225 .raw_writefn = raw_write, .writefn = sctlr_write, 5226 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), 5227 .resetvalue = cpu->reset_sctlr }, 5228 REGINFO_SENTINEL 5229 }; 5230 5231 define_arm_cp_regs(cpu, el3_regs); 5232 } 5233 /* The behaviour of NSACR is sufficiently various that we don't 5234 * try to describe it in a single reginfo: 5235 * if EL3 is 64 bit, then trap to EL3 from S EL1, 5236 * reads as constant 0xc00 from NS EL1 and NS EL2 5237 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 5238 * if v7 without EL3, register doesn't exist 5239 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 5240 */ 5241 if (arm_feature(env, ARM_FEATURE_EL3)) { 5242 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5243 ARMCPRegInfo nsacr = { 5244 .name = "NSACR", .type = ARM_CP_CONST, 5245 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5246 .access = PL1_RW, .accessfn = nsacr_access, 5247 .resetvalue = 0xc00 5248 }; 5249 define_one_arm_cp_reg(cpu, &nsacr); 5250 } else { 5251 ARMCPRegInfo nsacr = { 5252 .name = "NSACR", 5253 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5254 .access = PL3_RW | PL1_R, 5255 .resetvalue = 0, 5256 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) 5257 }; 5258 define_one_arm_cp_reg(cpu, &nsacr); 5259 } 5260 } else { 5261 if (arm_feature(env, ARM_FEATURE_V8)) { 5262 ARMCPRegInfo nsacr = { 5263 .name = "NSACR", .type = ARM_CP_CONST, 5264 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5265 .access = PL1_R, 5266 .resetvalue = 0xc00 5267 }; 5268 define_one_arm_cp_reg(cpu, &nsacr); 5269 } 5270 } 5271 5272 if (arm_feature(env, ARM_FEATURE_PMSA)) { 5273 if (arm_feature(env, ARM_FEATURE_V6)) { 5274 /* PMSAv6 not implemented */ 5275 assert(arm_feature(env, ARM_FEATURE_V7)); 5276 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 5277 define_arm_cp_regs(cpu, pmsav7_cp_reginfo); 5278 } else { 5279 define_arm_cp_regs(cpu, pmsav5_cp_reginfo); 5280 } 5281 } else { 5282 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 5283 define_arm_cp_regs(cpu, vmsa_cp_reginfo); 5284 } 5285 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { 5286 define_arm_cp_regs(cpu, t2ee_cp_reginfo); 5287 } 5288 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { 5289 define_arm_cp_regs(cpu, generic_timer_cp_reginfo); 5290 } 5291 if (arm_feature(env, ARM_FEATURE_VAPA)) { 5292 define_arm_cp_regs(cpu, vapa_cp_reginfo); 5293 } 5294 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { 5295 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); 5296 } 5297 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { 5298 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); 5299 } 5300 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { 5301 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); 5302 } 5303 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 5304 define_arm_cp_regs(cpu, omap_cp_reginfo); 5305 } 5306 if (arm_feature(env, ARM_FEATURE_STRONGARM)) { 5307 define_arm_cp_regs(cpu, strongarm_cp_reginfo); 5308 } 5309 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 5310 define_arm_cp_regs(cpu, xscale_cp_reginfo); 5311 } 5312 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { 5313 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); 5314 } 5315 if (arm_feature(env, ARM_FEATURE_LPAE)) { 5316 define_arm_cp_regs(cpu, lpae_cp_reginfo); 5317 } 5318 /* Slightly awkwardly, the OMAP and StrongARM cores need all of 5319 * cp15 crn=0 to be writes-ignored, whereas for other cores they should 5320 * be read-only (ie write causes UNDEF exception). 5321 */ 5322 { 5323 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { 5324 /* Pre-v8 MIDR space. 5325 * Note that the MIDR isn't a simple constant register because 5326 * of the TI925 behaviour where writes to another register can 5327 * cause the MIDR value to change. 5328 * 5329 * Unimplemented registers in the c15 0 0 0 space default to 5330 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR 5331 * and friends override accordingly. 5332 */ 5333 { .name = "MIDR", 5334 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, 5335 .access = PL1_R, .resetvalue = cpu->midr, 5336 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, 5337 .readfn = midr_read, 5338 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 5339 .type = ARM_CP_OVERRIDE }, 5340 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ 5341 { .name = "DUMMY", 5342 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, 5343 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5344 { .name = "DUMMY", 5345 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, 5346 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5347 { .name = "DUMMY", 5348 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, 5349 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5350 { .name = "DUMMY", 5351 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, 5352 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5353 { .name = "DUMMY", 5354 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, 5355 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5356 REGINFO_SENTINEL 5357 }; 5358 ARMCPRegInfo id_v8_midr_cp_reginfo[] = { 5359 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, 5360 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, 5361 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, 5362 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 5363 .readfn = midr_read }, 5364 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ 5365 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 5366 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 5367 .access = PL1_R, .resetvalue = cpu->midr }, 5368 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 5369 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, 5370 .access = PL1_R, .resetvalue = cpu->midr }, 5371 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, 5372 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, 5373 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, 5374 REGINFO_SENTINEL 5375 }; 5376 ARMCPRegInfo id_cp_reginfo[] = { 5377 /* These are common to v8 and pre-v8 */ 5378 { .name = "CTR", 5379 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, 5380 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 5381 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, 5382 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, 5383 .access = PL0_R, .accessfn = ctr_el0_access, 5384 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 5385 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ 5386 { .name = "TCMTR", 5387 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, 5388 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5389 REGINFO_SENTINEL 5390 }; 5391 /* TLBTR is specific to VMSA */ 5392 ARMCPRegInfo id_tlbtr_reginfo = { 5393 .name = "TLBTR", 5394 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, 5395 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0, 5396 }; 5397 /* MPUIR is specific to PMSA V6+ */ 5398 ARMCPRegInfo id_mpuir_reginfo = { 5399 .name = "MPUIR", 5400 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 5401 .access = PL1_R, .type = ARM_CP_CONST, 5402 .resetvalue = cpu->pmsav7_dregion << 8 5403 }; 5404 ARMCPRegInfo crn0_wi_reginfo = { 5405 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, 5406 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, 5407 .type = ARM_CP_NOP | ARM_CP_OVERRIDE 5408 }; 5409 if (arm_feature(env, ARM_FEATURE_OMAPCP) || 5410 arm_feature(env, ARM_FEATURE_STRONGARM)) { 5411 ARMCPRegInfo *r; 5412 /* Register the blanket "writes ignored" value first to cover the 5413 * whole space. Then update the specific ID registers to allow write 5414 * access, so that they ignore writes rather than causing them to 5415 * UNDEF. 5416 */ 5417 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); 5418 for (r = id_pre_v8_midr_cp_reginfo; 5419 r->type != ARM_CP_SENTINEL; r++) { 5420 r->access = PL1_RW; 5421 } 5422 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { 5423 r->access = PL1_RW; 5424 } 5425 id_mpuir_reginfo.access = PL1_RW; 5426 id_tlbtr_reginfo.access = PL1_RW; 5427 } 5428 if (arm_feature(env, ARM_FEATURE_V8)) { 5429 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); 5430 } else { 5431 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); 5432 } 5433 define_arm_cp_regs(cpu, id_cp_reginfo); 5434 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 5435 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); 5436 } else if (arm_feature(env, ARM_FEATURE_V7)) { 5437 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 5438 } 5439 } 5440 5441 if (arm_feature(env, ARM_FEATURE_MPIDR)) { 5442 define_arm_cp_regs(cpu, mpidr_cp_reginfo); 5443 } 5444 5445 if (arm_feature(env, ARM_FEATURE_AUXCR)) { 5446 ARMCPRegInfo auxcr_reginfo[] = { 5447 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, 5448 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, 5449 .access = PL1_RW, .type = ARM_CP_CONST, 5450 .resetvalue = cpu->reset_auxcr }, 5451 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, 5452 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, 5453 .access = PL2_RW, .type = ARM_CP_CONST, 5454 .resetvalue = 0 }, 5455 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, 5456 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, 5457 .access = PL3_RW, .type = ARM_CP_CONST, 5458 .resetvalue = 0 }, 5459 REGINFO_SENTINEL 5460 }; 5461 define_arm_cp_regs(cpu, auxcr_reginfo); 5462 } 5463 5464 if (arm_feature(env, ARM_FEATURE_CBAR)) { 5465 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5466 /* 32 bit view is [31:18] 0...0 [43:32]. */ 5467 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) 5468 | extract64(cpu->reset_cbar, 32, 12); 5469 ARMCPRegInfo cbar_reginfo[] = { 5470 { .name = "CBAR", 5471 .type = ARM_CP_CONST, 5472 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 5473 .access = PL1_R, .resetvalue = cpu->reset_cbar }, 5474 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, 5475 .type = ARM_CP_CONST, 5476 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, 5477 .access = PL1_R, .resetvalue = cbar32 }, 5478 REGINFO_SENTINEL 5479 }; 5480 /* We don't implement a r/w 64 bit CBAR currently */ 5481 assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); 5482 define_arm_cp_regs(cpu, cbar_reginfo); 5483 } else { 5484 ARMCPRegInfo cbar = { 5485 .name = "CBAR", 5486 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 5487 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, 5488 .fieldoffset = offsetof(CPUARMState, 5489 cp15.c15_config_base_address) 5490 }; 5491 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { 5492 cbar.access = PL1_R; 5493 cbar.fieldoffset = 0; 5494 cbar.type = ARM_CP_CONST; 5495 } 5496 define_one_arm_cp_reg(cpu, &cbar); 5497 } 5498 } 5499 5500 if (arm_feature(env, ARM_FEATURE_VBAR)) { 5501 ARMCPRegInfo vbar_cp_reginfo[] = { 5502 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, 5503 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, 5504 .access = PL1_RW, .writefn = vbar_write, 5505 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), 5506 offsetof(CPUARMState, cp15.vbar_ns) }, 5507 .resetvalue = 0 }, 5508 REGINFO_SENTINEL 5509 }; 5510 define_arm_cp_regs(cpu, vbar_cp_reginfo); 5511 } 5512 5513 /* Generic registers whose values depend on the implementation */ 5514 { 5515 ARMCPRegInfo sctlr = { 5516 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, 5517 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 5518 .access = PL1_RW, 5519 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), 5520 offsetof(CPUARMState, cp15.sctlr_ns) }, 5521 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, 5522 .raw_writefn = raw_write, 5523 }; 5524 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 5525 /* Normally we would always end the TB on an SCTLR write, but Linux 5526 * arch/arm/mach-pxa/sleep.S expects two instructions following 5527 * an MMU enable to execute from cache. Imitate this behaviour. 5528 */ 5529 sctlr.type |= ARM_CP_SUPPRESS_TB_END; 5530 } 5531 define_one_arm_cp_reg(cpu, &sctlr); 5532 } 5533 5534 if (arm_feature(env, ARM_FEATURE_SVE)) { 5535 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); 5536 if (arm_feature(env, ARM_FEATURE_EL2)) { 5537 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); 5538 } else { 5539 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); 5540 } 5541 if (arm_feature(env, ARM_FEATURE_EL3)) { 5542 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); 5543 } 5544 } 5545 } 5546 5547 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) 5548 { 5549 CPUState *cs = CPU(cpu); 5550 CPUARMState *env = &cpu->env; 5551 5552 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5553 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, 5554 aarch64_fpu_gdb_set_reg, 5555 34, "aarch64-fpu.xml", 0); 5556 } else if (arm_feature(env, ARM_FEATURE_NEON)) { 5557 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5558 51, "arm-neon.xml", 0); 5559 } else if (arm_feature(env, ARM_FEATURE_VFP3)) { 5560 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5561 35, "arm-vfp3.xml", 0); 5562 } else if (arm_feature(env, ARM_FEATURE_VFP)) { 5563 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5564 19, "arm-vfp.xml", 0); 5565 } 5566 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, 5567 arm_gen_dynamic_xml(cs), 5568 "system-registers.xml", 0); 5569 } 5570 5571 /* Sort alphabetically by type name, except for "any". */ 5572 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) 5573 { 5574 ObjectClass *class_a = (ObjectClass *)a; 5575 ObjectClass *class_b = (ObjectClass *)b; 5576 const char *name_a, *name_b; 5577 5578 name_a = object_class_get_name(class_a); 5579 name_b = object_class_get_name(class_b); 5580 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { 5581 return 1; 5582 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { 5583 return -1; 5584 } else { 5585 return strcmp(name_a, name_b); 5586 } 5587 } 5588 5589 static void arm_cpu_list_entry(gpointer data, gpointer user_data) 5590 { 5591 ObjectClass *oc = data; 5592 CPUListState *s = user_data; 5593 const char *typename; 5594 char *name; 5595 5596 typename = object_class_get_name(oc); 5597 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); 5598 (*s->cpu_fprintf)(s->file, " %s\n", 5599 name); 5600 g_free(name); 5601 } 5602 5603 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf) 5604 { 5605 CPUListState s = { 5606 .file = f, 5607 .cpu_fprintf = cpu_fprintf, 5608 }; 5609 GSList *list; 5610 5611 list = object_class_get_list(TYPE_ARM_CPU, false); 5612 list = g_slist_sort(list, arm_cpu_list_compare); 5613 (*cpu_fprintf)(f, "Available CPUs:\n"); 5614 g_slist_foreach(list, arm_cpu_list_entry, &s); 5615 g_slist_free(list); 5616 } 5617 5618 static void arm_cpu_add_definition(gpointer data, gpointer user_data) 5619 { 5620 ObjectClass *oc = data; 5621 CpuDefinitionInfoList **cpu_list = user_data; 5622 CpuDefinitionInfoList *entry; 5623 CpuDefinitionInfo *info; 5624 const char *typename; 5625 5626 typename = object_class_get_name(oc); 5627 info = g_malloc0(sizeof(*info)); 5628 info->name = g_strndup(typename, 5629 strlen(typename) - strlen("-" TYPE_ARM_CPU)); 5630 info->q_typename = g_strdup(typename); 5631 5632 entry = g_malloc0(sizeof(*entry)); 5633 entry->value = info; 5634 entry->next = *cpu_list; 5635 *cpu_list = entry; 5636 } 5637 5638 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp) 5639 { 5640 CpuDefinitionInfoList *cpu_list = NULL; 5641 GSList *list; 5642 5643 list = object_class_get_list(TYPE_ARM_CPU, false); 5644 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); 5645 g_slist_free(list); 5646 5647 return cpu_list; 5648 } 5649 5650 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, 5651 void *opaque, int state, int secstate, 5652 int crm, int opc1, int opc2, 5653 const char *name) 5654 { 5655 /* Private utility function for define_one_arm_cp_reg_with_opaque(): 5656 * add a single reginfo struct to the hash table. 5657 */ 5658 uint32_t *key = g_new(uint32_t, 1); 5659 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); 5660 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; 5661 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; 5662 5663 r2->name = g_strdup(name); 5664 /* Reset the secure state to the specific incoming state. This is 5665 * necessary as the register may have been defined with both states. 5666 */ 5667 r2->secure = secstate; 5668 5669 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 5670 /* Register is banked (using both entries in array). 5671 * Overwriting fieldoffset as the array is only used to define 5672 * banked registers but later only fieldoffset is used. 5673 */ 5674 r2->fieldoffset = r->bank_fieldoffsets[ns]; 5675 } 5676 5677 if (state == ARM_CP_STATE_AA32) { 5678 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 5679 /* If the register is banked then we don't need to migrate or 5680 * reset the 32-bit instance in certain cases: 5681 * 5682 * 1) If the register has both 32-bit and 64-bit instances then we 5683 * can count on the 64-bit instance taking care of the 5684 * non-secure bank. 5685 * 2) If ARMv8 is enabled then we can count on a 64-bit version 5686 * taking care of the secure bank. This requires that separate 5687 * 32 and 64-bit definitions are provided. 5688 */ 5689 if ((r->state == ARM_CP_STATE_BOTH && ns) || 5690 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { 5691 r2->type |= ARM_CP_ALIAS; 5692 } 5693 } else if ((secstate != r->secure) && !ns) { 5694 /* The register is not banked so we only want to allow migration of 5695 * the non-secure instance. 5696 */ 5697 r2->type |= ARM_CP_ALIAS; 5698 } 5699 5700 if (r->state == ARM_CP_STATE_BOTH) { 5701 /* We assume it is a cp15 register if the .cp field is left unset. 5702 */ 5703 if (r2->cp == 0) { 5704 r2->cp = 15; 5705 } 5706 5707 #ifdef HOST_WORDS_BIGENDIAN 5708 if (r2->fieldoffset) { 5709 r2->fieldoffset += sizeof(uint32_t); 5710 } 5711 #endif 5712 } 5713 } 5714 if (state == ARM_CP_STATE_AA64) { 5715 /* To allow abbreviation of ARMCPRegInfo 5716 * definitions, we treat cp == 0 as equivalent to 5717 * the value for "standard guest-visible sysreg". 5718 * STATE_BOTH definitions are also always "standard 5719 * sysreg" in their AArch64 view (the .cp value may 5720 * be non-zero for the benefit of the AArch32 view). 5721 */ 5722 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { 5723 r2->cp = CP_REG_ARM64_SYSREG_CP; 5724 } 5725 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, 5726 r2->opc0, opc1, opc2); 5727 } else { 5728 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); 5729 } 5730 if (opaque) { 5731 r2->opaque = opaque; 5732 } 5733 /* reginfo passed to helpers is correct for the actual access, 5734 * and is never ARM_CP_STATE_BOTH: 5735 */ 5736 r2->state = state; 5737 /* Make sure reginfo passed to helpers for wildcarded regs 5738 * has the correct crm/opc1/opc2 for this reg, not CP_ANY: 5739 */ 5740 r2->crm = crm; 5741 r2->opc1 = opc1; 5742 r2->opc2 = opc2; 5743 /* By convention, for wildcarded registers only the first 5744 * entry is used for migration; the others are marked as 5745 * ALIAS so we don't try to transfer the register 5746 * multiple times. Special registers (ie NOP/WFI) are 5747 * never migratable and not even raw-accessible. 5748 */ 5749 if ((r->type & ARM_CP_SPECIAL)) { 5750 r2->type |= ARM_CP_NO_RAW; 5751 } 5752 if (((r->crm == CP_ANY) && crm != 0) || 5753 ((r->opc1 == CP_ANY) && opc1 != 0) || 5754 ((r->opc2 == CP_ANY) && opc2 != 0)) { 5755 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; 5756 } 5757 5758 /* Check that raw accesses are either forbidden or handled. Note that 5759 * we can't assert this earlier because the setup of fieldoffset for 5760 * banked registers has to be done first. 5761 */ 5762 if (!(r2->type & ARM_CP_NO_RAW)) { 5763 assert(!raw_accessors_invalid(r2)); 5764 } 5765 5766 /* Overriding of an existing definition must be explicitly 5767 * requested. 5768 */ 5769 if (!(r->type & ARM_CP_OVERRIDE)) { 5770 ARMCPRegInfo *oldreg; 5771 oldreg = g_hash_table_lookup(cpu->cp_regs, key); 5772 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { 5773 fprintf(stderr, "Register redefined: cp=%d %d bit " 5774 "crn=%d crm=%d opc1=%d opc2=%d, " 5775 "was %s, now %s\n", r2->cp, 32 + 32 * is64, 5776 r2->crn, r2->crm, r2->opc1, r2->opc2, 5777 oldreg->name, r2->name); 5778 g_assert_not_reached(); 5779 } 5780 } 5781 g_hash_table_insert(cpu->cp_regs, key, r2); 5782 } 5783 5784 5785 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 5786 const ARMCPRegInfo *r, void *opaque) 5787 { 5788 /* Define implementations of coprocessor registers. 5789 * We store these in a hashtable because typically 5790 * there are less than 150 registers in a space which 5791 * is 16*16*16*8*8 = 262144 in size. 5792 * Wildcarding is supported for the crm, opc1 and opc2 fields. 5793 * If a register is defined twice then the second definition is 5794 * used, so this can be used to define some generic registers and 5795 * then override them with implementation specific variations. 5796 * At least one of the original and the second definition should 5797 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard 5798 * against accidental use. 5799 * 5800 * The state field defines whether the register is to be 5801 * visible in the AArch32 or AArch64 execution state. If the 5802 * state is set to ARM_CP_STATE_BOTH then we synthesise a 5803 * reginfo structure for the AArch32 view, which sees the lower 5804 * 32 bits of the 64 bit register. 5805 * 5806 * Only registers visible in AArch64 may set r->opc0; opc0 cannot 5807 * be wildcarded. AArch64 registers are always considered to be 64 5808 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of 5809 * the register, if any. 5810 */ 5811 int crm, opc1, opc2, state; 5812 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; 5813 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; 5814 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; 5815 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; 5816 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; 5817 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; 5818 /* 64 bit registers have only CRm and Opc1 fields */ 5819 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); 5820 /* op0 only exists in the AArch64 encodings */ 5821 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); 5822 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ 5823 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); 5824 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 5825 * encodes a minimum access level for the register. We roll this 5826 * runtime check into our general permission check code, so check 5827 * here that the reginfo's specified permissions are strict enough 5828 * to encompass the generic architectural permission check. 5829 */ 5830 if (r->state != ARM_CP_STATE_AA32) { 5831 int mask = 0; 5832 switch (r->opc1) { 5833 case 0: case 1: case 2: 5834 /* min_EL EL1 */ 5835 mask = PL1_RW; 5836 break; 5837 case 3: 5838 /* min_EL EL0 */ 5839 mask = PL0_RW; 5840 break; 5841 case 4: 5842 /* min_EL EL2 */ 5843 mask = PL2_RW; 5844 break; 5845 case 5: 5846 /* unallocated encoding, so not possible */ 5847 assert(false); 5848 break; 5849 case 6: 5850 /* min_EL EL3 */ 5851 mask = PL3_RW; 5852 break; 5853 case 7: 5854 /* min_EL EL1, secure mode only (we don't check the latter) */ 5855 mask = PL1_RW; 5856 break; 5857 default: 5858 /* broken reginfo with out-of-range opc1 */ 5859 assert(false); 5860 break; 5861 } 5862 /* assert our permissions are not too lax (stricter is fine) */ 5863 assert((r->access & ~mask) == 0); 5864 } 5865 5866 /* Check that the register definition has enough info to handle 5867 * reads and writes if they are permitted. 5868 */ 5869 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { 5870 if (r->access & PL3_R) { 5871 assert((r->fieldoffset || 5872 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 5873 r->readfn); 5874 } 5875 if (r->access & PL3_W) { 5876 assert((r->fieldoffset || 5877 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 5878 r->writefn); 5879 } 5880 } 5881 /* Bad type field probably means missing sentinel at end of reg list */ 5882 assert(cptype_valid(r->type)); 5883 for (crm = crmmin; crm <= crmmax; crm++) { 5884 for (opc1 = opc1min; opc1 <= opc1max; opc1++) { 5885 for (opc2 = opc2min; opc2 <= opc2max; opc2++) { 5886 for (state = ARM_CP_STATE_AA32; 5887 state <= ARM_CP_STATE_AA64; state++) { 5888 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { 5889 continue; 5890 } 5891 if (state == ARM_CP_STATE_AA32) { 5892 /* Under AArch32 CP registers can be common 5893 * (same for secure and non-secure world) or banked. 5894 */ 5895 char *name; 5896 5897 switch (r->secure) { 5898 case ARM_CP_SECSTATE_S: 5899 case ARM_CP_SECSTATE_NS: 5900 add_cpreg_to_hashtable(cpu, r, opaque, state, 5901 r->secure, crm, opc1, opc2, 5902 r->name); 5903 break; 5904 default: 5905 name = g_strdup_printf("%s_S", r->name); 5906 add_cpreg_to_hashtable(cpu, r, opaque, state, 5907 ARM_CP_SECSTATE_S, 5908 crm, opc1, opc2, name); 5909 g_free(name); 5910 add_cpreg_to_hashtable(cpu, r, opaque, state, 5911 ARM_CP_SECSTATE_NS, 5912 crm, opc1, opc2, r->name); 5913 break; 5914 } 5915 } else { 5916 /* AArch64 registers get mapped to non-secure instance 5917 * of AArch32 */ 5918 add_cpreg_to_hashtable(cpu, r, opaque, state, 5919 ARM_CP_SECSTATE_NS, 5920 crm, opc1, opc2, r->name); 5921 } 5922 } 5923 } 5924 } 5925 } 5926 } 5927 5928 void define_arm_cp_regs_with_opaque(ARMCPU *cpu, 5929 const ARMCPRegInfo *regs, void *opaque) 5930 { 5931 /* Define a whole list of registers */ 5932 const ARMCPRegInfo *r; 5933 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 5934 define_one_arm_cp_reg_with_opaque(cpu, r, opaque); 5935 } 5936 } 5937 5938 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) 5939 { 5940 return g_hash_table_lookup(cpregs, &encoded_cp); 5941 } 5942 5943 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 5944 uint64_t value) 5945 { 5946 /* Helper coprocessor write function for write-ignore registers */ 5947 } 5948 5949 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) 5950 { 5951 /* Helper coprocessor write function for read-as-zero registers */ 5952 return 0; 5953 } 5954 5955 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) 5956 { 5957 /* Helper coprocessor reset function for do-nothing-on-reset registers */ 5958 } 5959 5960 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) 5961 { 5962 /* Return true if it is not valid for us to switch to 5963 * this CPU mode (ie all the UNPREDICTABLE cases in 5964 * the ARM ARM CPSRWriteByInstr pseudocode). 5965 */ 5966 5967 /* Changes to or from Hyp via MSR and CPS are illegal. */ 5968 if (write_type == CPSRWriteByInstr && 5969 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || 5970 mode == ARM_CPU_MODE_HYP)) { 5971 return 1; 5972 } 5973 5974 switch (mode) { 5975 case ARM_CPU_MODE_USR: 5976 return 0; 5977 case ARM_CPU_MODE_SYS: 5978 case ARM_CPU_MODE_SVC: 5979 case ARM_CPU_MODE_ABT: 5980 case ARM_CPU_MODE_UND: 5981 case ARM_CPU_MODE_IRQ: 5982 case ARM_CPU_MODE_FIQ: 5983 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 5984 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) 5985 */ 5986 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR 5987 * and CPS are treated as illegal mode changes. 5988 */ 5989 if (write_type == CPSRWriteByInstr && 5990 (env->cp15.hcr_el2 & HCR_TGE) && 5991 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && 5992 !arm_is_secure_below_el3(env)) { 5993 return 1; 5994 } 5995 return 0; 5996 case ARM_CPU_MODE_HYP: 5997 return !arm_feature(env, ARM_FEATURE_EL2) 5998 || arm_current_el(env) < 2 || arm_is_secure(env); 5999 case ARM_CPU_MODE_MON: 6000 return arm_current_el(env) < 3; 6001 default: 6002 return 1; 6003 } 6004 } 6005 6006 uint32_t cpsr_read(CPUARMState *env) 6007 { 6008 int ZF; 6009 ZF = (env->ZF == 0); 6010 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 6011 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 6012 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 6013 | ((env->condexec_bits & 0xfc) << 8) 6014 | (env->GE << 16) | (env->daif & CPSR_AIF); 6015 } 6016 6017 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 6018 CPSRWriteType write_type) 6019 { 6020 uint32_t changed_daif; 6021 6022 if (mask & CPSR_NZCV) { 6023 env->ZF = (~val) & CPSR_Z; 6024 env->NF = val; 6025 env->CF = (val >> 29) & 1; 6026 env->VF = (val << 3) & 0x80000000; 6027 } 6028 if (mask & CPSR_Q) 6029 env->QF = ((val & CPSR_Q) != 0); 6030 if (mask & CPSR_T) 6031 env->thumb = ((val & CPSR_T) != 0); 6032 if (mask & CPSR_IT_0_1) { 6033 env->condexec_bits &= ~3; 6034 env->condexec_bits |= (val >> 25) & 3; 6035 } 6036 if (mask & CPSR_IT_2_7) { 6037 env->condexec_bits &= 3; 6038 env->condexec_bits |= (val >> 8) & 0xfc; 6039 } 6040 if (mask & CPSR_GE) { 6041 env->GE = (val >> 16) & 0xf; 6042 } 6043 6044 /* In a V7 implementation that includes the security extensions but does 6045 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control 6046 * whether non-secure software is allowed to change the CPSR_F and CPSR_A 6047 * bits respectively. 6048 * 6049 * In a V8 implementation, it is permitted for privileged software to 6050 * change the CPSR A/F bits regardless of the SCR.AW/FW bits. 6051 */ 6052 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && 6053 arm_feature(env, ARM_FEATURE_EL3) && 6054 !arm_feature(env, ARM_FEATURE_EL2) && 6055 !arm_is_secure(env)) { 6056 6057 changed_daif = (env->daif ^ val) & mask; 6058 6059 if (changed_daif & CPSR_A) { 6060 /* Check to see if we are allowed to change the masking of async 6061 * abort exceptions from a non-secure state. 6062 */ 6063 if (!(env->cp15.scr_el3 & SCR_AW)) { 6064 qemu_log_mask(LOG_GUEST_ERROR, 6065 "Ignoring attempt to switch CPSR_A flag from " 6066 "non-secure world with SCR.AW bit clear\n"); 6067 mask &= ~CPSR_A; 6068 } 6069 } 6070 6071 if (changed_daif & CPSR_F) { 6072 /* Check to see if we are allowed to change the masking of FIQ 6073 * exceptions from a non-secure state. 6074 */ 6075 if (!(env->cp15.scr_el3 & SCR_FW)) { 6076 qemu_log_mask(LOG_GUEST_ERROR, 6077 "Ignoring attempt to switch CPSR_F flag from " 6078 "non-secure world with SCR.FW bit clear\n"); 6079 mask &= ~CPSR_F; 6080 } 6081 6082 /* Check whether non-maskable FIQ (NMFI) support is enabled. 6083 * If this bit is set software is not allowed to mask 6084 * FIQs, but is allowed to set CPSR_F to 0. 6085 */ 6086 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && 6087 (val & CPSR_F)) { 6088 qemu_log_mask(LOG_GUEST_ERROR, 6089 "Ignoring attempt to enable CPSR_F flag " 6090 "(non-maskable FIQ [NMFI] support enabled)\n"); 6091 mask &= ~CPSR_F; 6092 } 6093 } 6094 } 6095 6096 env->daif &= ~(CPSR_AIF & mask); 6097 env->daif |= val & CPSR_AIF & mask; 6098 6099 if (write_type != CPSRWriteRaw && 6100 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { 6101 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { 6102 /* Note that we can only get here in USR mode if this is a 6103 * gdb stub write; for this case we follow the architectural 6104 * behaviour for guest writes in USR mode of ignoring an attempt 6105 * to switch mode. (Those are caught by translate.c for writes 6106 * triggered by guest instructions.) 6107 */ 6108 mask &= ~CPSR_M; 6109 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { 6110 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in 6111 * v7, and has defined behaviour in v8: 6112 * + leave CPSR.M untouched 6113 * + allow changes to the other CPSR fields 6114 * + set PSTATE.IL 6115 * For user changes via the GDB stub, we don't set PSTATE.IL, 6116 * as this would be unnecessarily harsh for a user error. 6117 */ 6118 mask &= ~CPSR_M; 6119 if (write_type != CPSRWriteByGDBStub && 6120 arm_feature(env, ARM_FEATURE_V8)) { 6121 mask |= CPSR_IL; 6122 val |= CPSR_IL; 6123 } 6124 } else { 6125 switch_mode(env, val & CPSR_M); 6126 } 6127 } 6128 mask &= ~CACHED_CPSR_BITS; 6129 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 6130 } 6131 6132 /* Sign/zero extend */ 6133 uint32_t HELPER(sxtb16)(uint32_t x) 6134 { 6135 uint32_t res; 6136 res = (uint16_t)(int8_t)x; 6137 res |= (uint32_t)(int8_t)(x >> 16) << 16; 6138 return res; 6139 } 6140 6141 uint32_t HELPER(uxtb16)(uint32_t x) 6142 { 6143 uint32_t res; 6144 res = (uint16_t)(uint8_t)x; 6145 res |= (uint32_t)(uint8_t)(x >> 16) << 16; 6146 return res; 6147 } 6148 6149 int32_t HELPER(sdiv)(int32_t num, int32_t den) 6150 { 6151 if (den == 0) 6152 return 0; 6153 if (num == INT_MIN && den == -1) 6154 return INT_MIN; 6155 return num / den; 6156 } 6157 6158 uint32_t HELPER(udiv)(uint32_t num, uint32_t den) 6159 { 6160 if (den == 0) 6161 return 0; 6162 return num / den; 6163 } 6164 6165 uint32_t HELPER(rbit)(uint32_t x) 6166 { 6167 return revbit32(x); 6168 } 6169 6170 #if defined(CONFIG_USER_ONLY) 6171 6172 /* These should probably raise undefined insn exceptions. */ 6173 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) 6174 { 6175 ARMCPU *cpu = arm_env_get_cpu(env); 6176 6177 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg); 6178 } 6179 6180 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 6181 { 6182 ARMCPU *cpu = arm_env_get_cpu(env); 6183 6184 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg); 6185 return 0; 6186 } 6187 6188 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) 6189 { 6190 /* translate.c should never generate calls here in user-only mode */ 6191 g_assert_not_reached(); 6192 } 6193 6194 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) 6195 { 6196 /* translate.c should never generate calls here in user-only mode */ 6197 g_assert_not_reached(); 6198 } 6199 6200 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) 6201 { 6202 /* The TT instructions can be used by unprivileged code, but in 6203 * user-only emulation we don't have the MPU. 6204 * Luckily since we know we are NonSecure unprivileged (and that in 6205 * turn means that the A flag wasn't specified), all the bits in the 6206 * register must be zero: 6207 * IREGION: 0 because IRVALID is 0 6208 * IRVALID: 0 because NS 6209 * S: 0 because NS 6210 * NSRW: 0 because NS 6211 * NSR: 0 because NS 6212 * RW: 0 because unpriv and A flag not set 6213 * R: 0 because unpriv and A flag not set 6214 * SRVALID: 0 because NS 6215 * MRVALID: 0 because unpriv and A flag not set 6216 * SREGION: 0 becaus SRVALID is 0 6217 * MREGION: 0 because MRVALID is 0 6218 */ 6219 return 0; 6220 } 6221 6222 void switch_mode(CPUARMState *env, int mode) 6223 { 6224 ARMCPU *cpu = arm_env_get_cpu(env); 6225 6226 if (mode != ARM_CPU_MODE_USR) { 6227 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); 6228 } 6229 } 6230 6231 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 6232 uint32_t cur_el, bool secure) 6233 { 6234 return 1; 6235 } 6236 6237 void aarch64_sync_64_to_32(CPUARMState *env) 6238 { 6239 g_assert_not_reached(); 6240 } 6241 6242 #else 6243 6244 void switch_mode(CPUARMState *env, int mode) 6245 { 6246 int old_mode; 6247 int i; 6248 6249 old_mode = env->uncached_cpsr & CPSR_M; 6250 if (mode == old_mode) 6251 return; 6252 6253 if (old_mode == ARM_CPU_MODE_FIQ) { 6254 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 6255 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 6256 } else if (mode == ARM_CPU_MODE_FIQ) { 6257 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 6258 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 6259 } 6260 6261 i = bank_number(old_mode); 6262 env->banked_r13[i] = env->regs[13]; 6263 env->banked_r14[i] = env->regs[14]; 6264 env->banked_spsr[i] = env->spsr; 6265 6266 i = bank_number(mode); 6267 env->regs[13] = env->banked_r13[i]; 6268 env->regs[14] = env->banked_r14[i]; 6269 env->spsr = env->banked_spsr[i]; 6270 } 6271 6272 /* Physical Interrupt Target EL Lookup Table 6273 * 6274 * [ From ARM ARM section G1.13.4 (Table G1-15) ] 6275 * 6276 * The below multi-dimensional table is used for looking up the target 6277 * exception level given numerous condition criteria. Specifically, the 6278 * target EL is based on SCR and HCR routing controls as well as the 6279 * currently executing EL and secure state. 6280 * 6281 * Dimensions: 6282 * target_el_table[2][2][2][2][2][4] 6283 * | | | | | +--- Current EL 6284 * | | | | +------ Non-secure(0)/Secure(1) 6285 * | | | +--------- HCR mask override 6286 * | | +------------ SCR exec state control 6287 * | +--------------- SCR mask override 6288 * +------------------ 32-bit(0)/64-bit(1) EL3 6289 * 6290 * The table values are as such: 6291 * 0-3 = EL0-EL3 6292 * -1 = Cannot occur 6293 * 6294 * The ARM ARM target EL table includes entries indicating that an "exception 6295 * is not taken". The two cases where this is applicable are: 6296 * 1) An exception is taken from EL3 but the SCR does not have the exception 6297 * routed to EL3. 6298 * 2) An exception is taken from EL2 but the HCR does not have the exception 6299 * routed to EL2. 6300 * In these two cases, the below table contain a target of EL1. This value is 6301 * returned as it is expected that the consumer of the table data will check 6302 * for "target EL >= current EL" to ensure the exception is not taken. 6303 * 6304 * SCR HCR 6305 * 64 EA AMO From 6306 * BIT IRQ IMO Non-secure Secure 6307 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 6308 */ 6309 static const int8_t target_el_table[2][2][2][2][2][4] = { 6310 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 6311 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, 6312 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 6313 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, 6314 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 6315 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, 6316 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 6317 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, 6318 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, 6319 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},}, 6320 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },}, 6321 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},}, 6322 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 6323 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, 6324 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 6325 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},}, 6326 }; 6327 6328 /* 6329 * Determine the target EL for physical exceptions 6330 */ 6331 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 6332 uint32_t cur_el, bool secure) 6333 { 6334 CPUARMState *env = cs->env_ptr; 6335 int rw; 6336 int scr; 6337 int hcr; 6338 int target_el; 6339 /* Is the highest EL AArch64? */ 6340 int is64 = arm_feature(env, ARM_FEATURE_AARCH64); 6341 6342 if (arm_feature(env, ARM_FEATURE_EL3)) { 6343 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); 6344 } else { 6345 /* Either EL2 is the highest EL (and so the EL2 register width 6346 * is given by is64); or there is no EL2 or EL3, in which case 6347 * the value of 'rw' does not affect the table lookup anyway. 6348 */ 6349 rw = is64; 6350 } 6351 6352 switch (excp_idx) { 6353 case EXCP_IRQ: 6354 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); 6355 hcr = arm_hcr_el2_imo(env); 6356 break; 6357 case EXCP_FIQ: 6358 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); 6359 hcr = arm_hcr_el2_fmo(env); 6360 break; 6361 default: 6362 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); 6363 hcr = arm_hcr_el2_amo(env); 6364 break; 6365 }; 6366 6367 /* If HCR.TGE is set then HCR is treated as being 1 */ 6368 hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE); 6369 6370 /* Perform a table-lookup for the target EL given the current state */ 6371 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; 6372 6373 assert(target_el > 0); 6374 6375 return target_el; 6376 } 6377 6378 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value, 6379 ARMMMUIdx mmu_idx, bool ignfault) 6380 { 6381 CPUState *cs = CPU(cpu); 6382 CPUARMState *env = &cpu->env; 6383 MemTxAttrs attrs = {}; 6384 MemTxResult txres; 6385 target_ulong page_size; 6386 hwaddr physaddr; 6387 int prot; 6388 ARMMMUFaultInfo fi; 6389 bool secure = mmu_idx & ARM_MMU_IDX_M_S; 6390 int exc; 6391 bool exc_secure; 6392 6393 if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr, 6394 &attrs, &prot, &page_size, &fi, NULL)) { 6395 /* MPU/SAU lookup failed */ 6396 if (fi.type == ARMFault_QEMU_SFault) { 6397 qemu_log_mask(CPU_LOG_INT, 6398 "...SecureFault with SFSR.AUVIOL during stacking\n"); 6399 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; 6400 env->v7m.sfar = addr; 6401 exc = ARMV7M_EXCP_SECURE; 6402 exc_secure = false; 6403 } else { 6404 qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MSTKERR\n"); 6405 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK; 6406 exc = ARMV7M_EXCP_MEM; 6407 exc_secure = secure; 6408 } 6409 goto pend_fault; 6410 } 6411 address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value, 6412 attrs, &txres); 6413 if (txres != MEMTX_OK) { 6414 /* BusFault trying to write the data */ 6415 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n"); 6416 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK; 6417 exc = ARMV7M_EXCP_BUS; 6418 exc_secure = false; 6419 goto pend_fault; 6420 } 6421 return true; 6422 6423 pend_fault: 6424 /* By pending the exception at this point we are making 6425 * the IMPDEF choice "overridden exceptions pended" (see the 6426 * MergeExcInfo() pseudocode). The other choice would be to not 6427 * pend them now and then make a choice about which to throw away 6428 * later if we have two derived exceptions. 6429 * The only case when we must not pend the exception but instead 6430 * throw it away is if we are doing the push of the callee registers 6431 * and we've already generated a derived exception. Even in this 6432 * case we will still update the fault status registers. 6433 */ 6434 if (!ignfault) { 6435 armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure); 6436 } 6437 return false; 6438 } 6439 6440 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr, 6441 ARMMMUIdx mmu_idx) 6442 { 6443 CPUState *cs = CPU(cpu); 6444 CPUARMState *env = &cpu->env; 6445 MemTxAttrs attrs = {}; 6446 MemTxResult txres; 6447 target_ulong page_size; 6448 hwaddr physaddr; 6449 int prot; 6450 ARMMMUFaultInfo fi; 6451 bool secure = mmu_idx & ARM_MMU_IDX_M_S; 6452 int exc; 6453 bool exc_secure; 6454 uint32_t value; 6455 6456 if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr, 6457 &attrs, &prot, &page_size, &fi, NULL)) { 6458 /* MPU/SAU lookup failed */ 6459 if (fi.type == ARMFault_QEMU_SFault) { 6460 qemu_log_mask(CPU_LOG_INT, 6461 "...SecureFault with SFSR.AUVIOL during unstack\n"); 6462 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; 6463 env->v7m.sfar = addr; 6464 exc = ARMV7M_EXCP_SECURE; 6465 exc_secure = false; 6466 } else { 6467 qemu_log_mask(CPU_LOG_INT, 6468 "...MemManageFault with CFSR.MUNSTKERR\n"); 6469 env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK; 6470 exc = ARMV7M_EXCP_MEM; 6471 exc_secure = secure; 6472 } 6473 goto pend_fault; 6474 } 6475 6476 value = address_space_ldl(arm_addressspace(cs, attrs), physaddr, 6477 attrs, &txres); 6478 if (txres != MEMTX_OK) { 6479 /* BusFault trying to read the data */ 6480 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n"); 6481 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK; 6482 exc = ARMV7M_EXCP_BUS; 6483 exc_secure = false; 6484 goto pend_fault; 6485 } 6486 6487 *dest = value; 6488 return true; 6489 6490 pend_fault: 6491 /* By pending the exception at this point we are making 6492 * the IMPDEF choice "overridden exceptions pended" (see the 6493 * MergeExcInfo() pseudocode). The other choice would be to not 6494 * pend them now and then make a choice about which to throw away 6495 * later if we have two derived exceptions. 6496 */ 6497 armv7m_nvic_set_pending(env->nvic, exc, exc_secure); 6498 return false; 6499 } 6500 6501 /* Return true if we're using the process stack pointer (not the MSP) */ 6502 static bool v7m_using_psp(CPUARMState *env) 6503 { 6504 /* Handler mode always uses the main stack; for thread mode 6505 * the CONTROL.SPSEL bit determines the answer. 6506 * Note that in v7M it is not possible to be in Handler mode with 6507 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 6508 */ 6509 return !arm_v7m_is_handler_mode(env) && 6510 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 6511 } 6512 6513 /* Write to v7M CONTROL.SPSEL bit for the specified security bank. 6514 * This may change the current stack pointer between Main and Process 6515 * stack pointers if it is done for the CONTROL register for the current 6516 * security state. 6517 */ 6518 static void write_v7m_control_spsel_for_secstate(CPUARMState *env, 6519 bool new_spsel, 6520 bool secstate) 6521 { 6522 bool old_is_psp = v7m_using_psp(env); 6523 6524 env->v7m.control[secstate] = 6525 deposit32(env->v7m.control[secstate], 6526 R_V7M_CONTROL_SPSEL_SHIFT, 6527 R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); 6528 6529 if (secstate == env->v7m.secure) { 6530 bool new_is_psp = v7m_using_psp(env); 6531 uint32_t tmp; 6532 6533 if (old_is_psp != new_is_psp) { 6534 tmp = env->v7m.other_sp; 6535 env->v7m.other_sp = env->regs[13]; 6536 env->regs[13] = tmp; 6537 } 6538 } 6539 } 6540 6541 /* Write to v7M CONTROL.SPSEL bit. This may change the current 6542 * stack pointer between Main and Process stack pointers. 6543 */ 6544 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) 6545 { 6546 write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure); 6547 } 6548 6549 void write_v7m_exception(CPUARMState *env, uint32_t new_exc) 6550 { 6551 /* Write a new value to v7m.exception, thus transitioning into or out 6552 * of Handler mode; this may result in a change of active stack pointer. 6553 */ 6554 bool new_is_psp, old_is_psp = v7m_using_psp(env); 6555 uint32_t tmp; 6556 6557 env->v7m.exception = new_exc; 6558 6559 new_is_psp = v7m_using_psp(env); 6560 6561 if (old_is_psp != new_is_psp) { 6562 tmp = env->v7m.other_sp; 6563 env->v7m.other_sp = env->regs[13]; 6564 env->regs[13] = tmp; 6565 } 6566 } 6567 6568 /* Switch M profile security state between NS and S */ 6569 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate) 6570 { 6571 uint32_t new_ss_msp, new_ss_psp; 6572 6573 if (env->v7m.secure == new_secstate) { 6574 return; 6575 } 6576 6577 /* All the banked state is accessed by looking at env->v7m.secure 6578 * except for the stack pointer; rearrange the SP appropriately. 6579 */ 6580 new_ss_msp = env->v7m.other_ss_msp; 6581 new_ss_psp = env->v7m.other_ss_psp; 6582 6583 if (v7m_using_psp(env)) { 6584 env->v7m.other_ss_psp = env->regs[13]; 6585 env->v7m.other_ss_msp = env->v7m.other_sp; 6586 } else { 6587 env->v7m.other_ss_msp = env->regs[13]; 6588 env->v7m.other_ss_psp = env->v7m.other_sp; 6589 } 6590 6591 env->v7m.secure = new_secstate; 6592 6593 if (v7m_using_psp(env)) { 6594 env->regs[13] = new_ss_psp; 6595 env->v7m.other_sp = new_ss_msp; 6596 } else { 6597 env->regs[13] = new_ss_msp; 6598 env->v7m.other_sp = new_ss_psp; 6599 } 6600 } 6601 6602 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) 6603 { 6604 /* Handle v7M BXNS: 6605 * - if the return value is a magic value, do exception return (like BX) 6606 * - otherwise bit 0 of the return value is the target security state 6607 */ 6608 uint32_t min_magic; 6609 6610 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6611 /* Covers FNC_RETURN and EXC_RETURN magic */ 6612 min_magic = FNC_RETURN_MIN_MAGIC; 6613 } else { 6614 /* EXC_RETURN magic only */ 6615 min_magic = EXC_RETURN_MIN_MAGIC; 6616 } 6617 6618 if (dest >= min_magic) { 6619 /* This is an exception return magic value; put it where 6620 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT. 6621 * Note that if we ever add gen_ss_advance() singlestep support to 6622 * M profile this should count as an "instruction execution complete" 6623 * event (compare gen_bx_excret_final_code()). 6624 */ 6625 env->regs[15] = dest & ~1; 6626 env->thumb = dest & 1; 6627 HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT); 6628 /* notreached */ 6629 } 6630 6631 /* translate.c should have made BXNS UNDEF unless we're secure */ 6632 assert(env->v7m.secure); 6633 6634 switch_v7m_security_state(env, dest & 1); 6635 env->thumb = 1; 6636 env->regs[15] = dest & ~1; 6637 } 6638 6639 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) 6640 { 6641 /* Handle v7M BLXNS: 6642 * - bit 0 of the destination address is the target security state 6643 */ 6644 6645 /* At this point regs[15] is the address just after the BLXNS */ 6646 uint32_t nextinst = env->regs[15] | 1; 6647 uint32_t sp = env->regs[13] - 8; 6648 uint32_t saved_psr; 6649 6650 /* translate.c will have made BLXNS UNDEF unless we're secure */ 6651 assert(env->v7m.secure); 6652 6653 if (dest & 1) { 6654 /* target is Secure, so this is just a normal BLX, 6655 * except that the low bit doesn't indicate Thumb/not. 6656 */ 6657 env->regs[14] = nextinst; 6658 env->thumb = 1; 6659 env->regs[15] = dest & ~1; 6660 return; 6661 } 6662 6663 /* Target is non-secure: first push a stack frame */ 6664 if (!QEMU_IS_ALIGNED(sp, 8)) { 6665 qemu_log_mask(LOG_GUEST_ERROR, 6666 "BLXNS with misaligned SP is UNPREDICTABLE\n"); 6667 } 6668 6669 saved_psr = env->v7m.exception; 6670 if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) { 6671 saved_psr |= XPSR_SFPA; 6672 } 6673 6674 /* Note that these stores can throw exceptions on MPU faults */ 6675 cpu_stl_data(env, sp, nextinst); 6676 cpu_stl_data(env, sp + 4, saved_psr); 6677 6678 env->regs[13] = sp; 6679 env->regs[14] = 0xfeffffff; 6680 if (arm_v7m_is_handler_mode(env)) { 6681 /* Write a dummy value to IPSR, to avoid leaking the current secure 6682 * exception number to non-secure code. This is guaranteed not 6683 * to cause write_v7m_exception() to actually change stacks. 6684 */ 6685 write_v7m_exception(env, 1); 6686 } 6687 switch_v7m_security_state(env, 0); 6688 env->thumb = 1; 6689 env->regs[15] = dest; 6690 } 6691 6692 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, 6693 bool spsel) 6694 { 6695 /* Return a pointer to the location where we currently store the 6696 * stack pointer for the requested security state and thread mode. 6697 * This pointer will become invalid if the CPU state is updated 6698 * such that the stack pointers are switched around (eg changing 6699 * the SPSEL control bit). 6700 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode(). 6701 * Unlike that pseudocode, we require the caller to pass us in the 6702 * SPSEL control bit value; this is because we also use this 6703 * function in handling of pushing of the callee-saves registers 6704 * part of the v8M stack frame (pseudocode PushCalleeStack()), 6705 * and in the tailchain codepath the SPSEL bit comes from the exception 6706 * return magic LR value from the previous exception. The pseudocode 6707 * opencodes the stack-selection in PushCalleeStack(), but we prefer 6708 * to make this utility function generic enough to do the job. 6709 */ 6710 bool want_psp = threadmode && spsel; 6711 6712 if (secure == env->v7m.secure) { 6713 if (want_psp == v7m_using_psp(env)) { 6714 return &env->regs[13]; 6715 } else { 6716 return &env->v7m.other_sp; 6717 } 6718 } else { 6719 if (want_psp) { 6720 return &env->v7m.other_ss_psp; 6721 } else { 6722 return &env->v7m.other_ss_msp; 6723 } 6724 } 6725 } 6726 6727 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure, 6728 uint32_t *pvec) 6729 { 6730 CPUState *cs = CPU(cpu); 6731 CPUARMState *env = &cpu->env; 6732 MemTxResult result; 6733 uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4; 6734 uint32_t vector_entry; 6735 MemTxAttrs attrs = {}; 6736 ARMMMUIdx mmu_idx; 6737 bool exc_secure; 6738 6739 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true); 6740 6741 /* We don't do a get_phys_addr() here because the rules for vector 6742 * loads are special: they always use the default memory map, and 6743 * the default memory map permits reads from all addresses. 6744 * Since there's no easy way to pass through to pmsav8_mpu_lookup() 6745 * that we want this special case which would always say "yes", 6746 * we just do the SAU lookup here followed by a direct physical load. 6747 */ 6748 attrs.secure = targets_secure; 6749 attrs.user = false; 6750 6751 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6752 V8M_SAttributes sattrs = {}; 6753 6754 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); 6755 if (sattrs.ns) { 6756 attrs.secure = false; 6757 } else if (!targets_secure) { 6758 /* NS access to S memory */ 6759 goto load_fail; 6760 } 6761 } 6762 6763 vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr, 6764 attrs, &result); 6765 if (result != MEMTX_OK) { 6766 goto load_fail; 6767 } 6768 *pvec = vector_entry; 6769 return true; 6770 6771 load_fail: 6772 /* All vector table fetch fails are reported as HardFault, with 6773 * HFSR.VECTTBL and .FORCED set. (FORCED is set because 6774 * technically the underlying exception is a MemManage or BusFault 6775 * that is escalated to HardFault.) This is a terminal exception, 6776 * so we will either take the HardFault immediately or else enter 6777 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()). 6778 */ 6779 exc_secure = targets_secure || 6780 !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK); 6781 env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK; 6782 armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure); 6783 return false; 6784 } 6785 6786 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, 6787 bool ignore_faults) 6788 { 6789 /* For v8M, push the callee-saves register part of the stack frame. 6790 * Compare the v8M pseudocode PushCalleeStack(). 6791 * In the tailchaining case this may not be the current stack. 6792 */ 6793 CPUARMState *env = &cpu->env; 6794 uint32_t *frame_sp_p; 6795 uint32_t frameptr; 6796 ARMMMUIdx mmu_idx; 6797 bool stacked_ok; 6798 6799 if (dotailchain) { 6800 bool mode = lr & R_V7M_EXCRET_MODE_MASK; 6801 bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) || 6802 !mode; 6803 6804 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv); 6805 frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode, 6806 lr & R_V7M_EXCRET_SPSEL_MASK); 6807 } else { 6808 mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 6809 frame_sp_p = &env->regs[13]; 6810 } 6811 6812 frameptr = *frame_sp_p - 0x28; 6813 6814 /* Write as much of the stack frame as we can. A write failure may 6815 * cause us to pend a derived exception. 6816 */ 6817 stacked_ok = 6818 v7m_stack_write(cpu, frameptr, 0xfefa125b, mmu_idx, ignore_faults) && 6819 v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, 6820 ignore_faults) && 6821 v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, 6822 ignore_faults) && 6823 v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, 6824 ignore_faults) && 6825 v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, 6826 ignore_faults) && 6827 v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, 6828 ignore_faults) && 6829 v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, 6830 ignore_faults) && 6831 v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, 6832 ignore_faults) && 6833 v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, 6834 ignore_faults); 6835 6836 /* Update SP regardless of whether any of the stack accesses failed. 6837 * When we implement v8M stack limit checking then this attempt to 6838 * update SP might also fail and result in a derived exception. 6839 */ 6840 *frame_sp_p = frameptr; 6841 6842 return !stacked_ok; 6843 } 6844 6845 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain, 6846 bool ignore_stackfaults) 6847 { 6848 /* Do the "take the exception" parts of exception entry, 6849 * but not the pushing of state to the stack. This is 6850 * similar to the pseudocode ExceptionTaken() function. 6851 */ 6852 CPUARMState *env = &cpu->env; 6853 uint32_t addr; 6854 bool targets_secure; 6855 int exc; 6856 bool push_failed = false; 6857 6858 armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); 6859 qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n", 6860 targets_secure ? "secure" : "nonsecure", exc); 6861 6862 if (arm_feature(env, ARM_FEATURE_V8)) { 6863 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && 6864 (lr & R_V7M_EXCRET_S_MASK)) { 6865 /* The background code (the owner of the registers in the 6866 * exception frame) is Secure. This means it may either already 6867 * have or now needs to push callee-saves registers. 6868 */ 6869 if (targets_secure) { 6870 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) { 6871 /* We took an exception from Secure to NonSecure 6872 * (which means the callee-saved registers got stacked) 6873 * and are now tailchaining to a Secure exception. 6874 * Clear DCRS so eventual return from this Secure 6875 * exception unstacks the callee-saved registers. 6876 */ 6877 lr &= ~R_V7M_EXCRET_DCRS_MASK; 6878 } 6879 } else { 6880 /* We're going to a non-secure exception; push the 6881 * callee-saves registers to the stack now, if they're 6882 * not already saved. 6883 */ 6884 if (lr & R_V7M_EXCRET_DCRS_MASK && 6885 !(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) { 6886 push_failed = v7m_push_callee_stack(cpu, lr, dotailchain, 6887 ignore_stackfaults); 6888 } 6889 lr |= R_V7M_EXCRET_DCRS_MASK; 6890 } 6891 } 6892 6893 lr &= ~R_V7M_EXCRET_ES_MASK; 6894 if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6895 lr |= R_V7M_EXCRET_ES_MASK; 6896 } 6897 lr &= ~R_V7M_EXCRET_SPSEL_MASK; 6898 if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) { 6899 lr |= R_V7M_EXCRET_SPSEL_MASK; 6900 } 6901 6902 /* Clear registers if necessary to prevent non-secure exception 6903 * code being able to see register values from secure code. 6904 * Where register values become architecturally UNKNOWN we leave 6905 * them with their previous values. 6906 */ 6907 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6908 if (!targets_secure) { 6909 /* Always clear the caller-saved registers (they have been 6910 * pushed to the stack earlier in v7m_push_stack()). 6911 * Clear callee-saved registers if the background code is 6912 * Secure (in which case these regs were saved in 6913 * v7m_push_callee_stack()). 6914 */ 6915 int i; 6916 6917 for (i = 0; i < 13; i++) { 6918 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */ 6919 if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) { 6920 env->regs[i] = 0; 6921 } 6922 } 6923 /* Clear EAPSR */ 6924 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT); 6925 } 6926 } 6927 } 6928 6929 if (push_failed && !ignore_stackfaults) { 6930 /* Derived exception on callee-saves register stacking: 6931 * we might now want to take a different exception which 6932 * targets a different security state, so try again from the top. 6933 */ 6934 qemu_log_mask(CPU_LOG_INT, 6935 "...derived exception on callee-saves register stacking"); 6936 v7m_exception_taken(cpu, lr, true, true); 6937 return; 6938 } 6939 6940 if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) { 6941 /* Vector load failed: derived exception */ 6942 qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load"); 6943 v7m_exception_taken(cpu, lr, true, true); 6944 return; 6945 } 6946 6947 /* Now we've done everything that might cause a derived exception 6948 * we can go ahead and activate whichever exception we're going to 6949 * take (which might now be the derived exception). 6950 */ 6951 armv7m_nvic_acknowledge_irq(env->nvic); 6952 6953 /* Switch to target security state -- must do this before writing SPSEL */ 6954 switch_v7m_security_state(env, targets_secure); 6955 write_v7m_control_spsel(env, 0); 6956 arm_clear_exclusive(env); 6957 /* Clear IT bits */ 6958 env->condexec_bits = 0; 6959 env->regs[14] = lr; 6960 env->regs[15] = addr & 0xfffffffe; 6961 env->thumb = addr & 1; 6962 } 6963 6964 static bool v7m_push_stack(ARMCPU *cpu) 6965 { 6966 /* Do the "set up stack frame" part of exception entry, 6967 * similar to pseudocode PushStack(). 6968 * Return true if we generate a derived exception (and so 6969 * should ignore further stack faults trying to process 6970 * that derived exception.) 6971 */ 6972 bool stacked_ok; 6973 CPUARMState *env = &cpu->env; 6974 uint32_t xpsr = xpsr_read(env); 6975 uint32_t frameptr = env->regs[13]; 6976 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 6977 6978 /* Align stack pointer if the guest wants that */ 6979 if ((frameptr & 4) && 6980 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) { 6981 frameptr -= 4; 6982 xpsr |= XPSR_SPREALIGN; 6983 } 6984 6985 frameptr -= 0x20; 6986 6987 /* Write as much of the stack frame as we can. If we fail a stack 6988 * write this will result in a derived exception being pended 6989 * (which may be taken in preference to the one we started with 6990 * if it has higher priority). 6991 */ 6992 stacked_ok = 6993 v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, false) && 6994 v7m_stack_write(cpu, frameptr + 4, env->regs[1], mmu_idx, false) && 6995 v7m_stack_write(cpu, frameptr + 8, env->regs[2], mmu_idx, false) && 6996 v7m_stack_write(cpu, frameptr + 12, env->regs[3], mmu_idx, false) && 6997 v7m_stack_write(cpu, frameptr + 16, env->regs[12], mmu_idx, false) && 6998 v7m_stack_write(cpu, frameptr + 20, env->regs[14], mmu_idx, false) && 6999 v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) && 7000 v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false); 7001 7002 /* Update SP regardless of whether any of the stack accesses failed. 7003 * When we implement v8M stack limit checking then this attempt to 7004 * update SP might also fail and result in a derived exception. 7005 */ 7006 env->regs[13] = frameptr; 7007 7008 return !stacked_ok; 7009 } 7010 7011 static void do_v7m_exception_exit(ARMCPU *cpu) 7012 { 7013 CPUARMState *env = &cpu->env; 7014 uint32_t excret; 7015 uint32_t xpsr; 7016 bool ufault = false; 7017 bool sfault = false; 7018 bool return_to_sp_process; 7019 bool return_to_handler; 7020 bool rettobase = false; 7021 bool exc_secure = false; 7022 bool return_to_secure; 7023 7024 /* If we're not in Handler mode then jumps to magic exception-exit 7025 * addresses don't have magic behaviour. However for the v8M 7026 * security extensions the magic secure-function-return has to 7027 * work in thread mode too, so to avoid doing an extra check in 7028 * the generated code we allow exception-exit magic to also cause the 7029 * internal exception and bring us here in thread mode. Correct code 7030 * will never try to do this (the following insn fetch will always 7031 * fault) so we the overhead of having taken an unnecessary exception 7032 * doesn't matter. 7033 */ 7034 if (!arm_v7m_is_handler_mode(env)) { 7035 return; 7036 } 7037 7038 /* In the spec pseudocode ExceptionReturn() is called directly 7039 * from BXWritePC() and gets the full target PC value including 7040 * bit zero. In QEMU's implementation we treat it as a normal 7041 * jump-to-register (which is then caught later on), and so split 7042 * the target value up between env->regs[15] and env->thumb in 7043 * gen_bx(). Reconstitute it. 7044 */ 7045 excret = env->regs[15]; 7046 if (env->thumb) { 7047 excret |= 1; 7048 } 7049 7050 qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32 7051 " previous exception %d\n", 7052 excret, env->v7m.exception); 7053 7054 if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) { 7055 qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception " 7056 "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n", 7057 excret); 7058 } 7059 7060 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7061 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before 7062 * we pick which FAULTMASK to clear. 7063 */ 7064 if (!env->v7m.secure && 7065 ((excret & R_V7M_EXCRET_ES_MASK) || 7066 !(excret & R_V7M_EXCRET_DCRS_MASK))) { 7067 sfault = 1; 7068 /* For all other purposes, treat ES as 0 (R_HXSR) */ 7069 excret &= ~R_V7M_EXCRET_ES_MASK; 7070 } 7071 exc_secure = excret & R_V7M_EXCRET_ES_MASK; 7072 } 7073 7074 if (env->v7m.exception != ARMV7M_EXCP_NMI) { 7075 /* Auto-clear FAULTMASK on return from other than NMI. 7076 * If the security extension is implemented then this only 7077 * happens if the raw execution priority is >= 0; the 7078 * value of the ES bit in the exception return value indicates 7079 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.) 7080 */ 7081 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7082 if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) { 7083 env->v7m.faultmask[exc_secure] = 0; 7084 } 7085 } else { 7086 env->v7m.faultmask[M_REG_NS] = 0; 7087 } 7088 } 7089 7090 switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception, 7091 exc_secure)) { 7092 case -1: 7093 /* attempt to exit an exception that isn't active */ 7094 ufault = true; 7095 break; 7096 case 0: 7097 /* still an irq active now */ 7098 break; 7099 case 1: 7100 /* we returned to base exception level, no nesting. 7101 * (In the pseudocode this is written using "NestedActivation != 1" 7102 * where we have 'rettobase == false'.) 7103 */ 7104 rettobase = true; 7105 break; 7106 default: 7107 g_assert_not_reached(); 7108 } 7109 7110 return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK); 7111 return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK; 7112 return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && 7113 (excret & R_V7M_EXCRET_S_MASK); 7114 7115 if (arm_feature(env, ARM_FEATURE_V8)) { 7116 if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7117 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP); 7118 * we choose to take the UsageFault. 7119 */ 7120 if ((excret & R_V7M_EXCRET_S_MASK) || 7121 (excret & R_V7M_EXCRET_ES_MASK) || 7122 !(excret & R_V7M_EXCRET_DCRS_MASK)) { 7123 ufault = true; 7124 } 7125 } 7126 if (excret & R_V7M_EXCRET_RES0_MASK) { 7127 ufault = true; 7128 } 7129 } else { 7130 /* For v7M we only recognize certain combinations of the low bits */ 7131 switch (excret & 0xf) { 7132 case 1: /* Return to Handler */ 7133 break; 7134 case 13: /* Return to Thread using Process stack */ 7135 case 9: /* Return to Thread using Main stack */ 7136 /* We only need to check NONBASETHRDENA for v7M, because in 7137 * v8M this bit does not exist (it is RES1). 7138 */ 7139 if (!rettobase && 7140 !(env->v7m.ccr[env->v7m.secure] & 7141 R_V7M_CCR_NONBASETHRDENA_MASK)) { 7142 ufault = true; 7143 } 7144 break; 7145 default: 7146 ufault = true; 7147 } 7148 } 7149 7150 /* 7151 * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in 7152 * Handler mode (and will be until we write the new XPSR.Interrupt 7153 * field) this does not switch around the current stack pointer. 7154 * We must do this before we do any kind of tailchaining, including 7155 * for the derived exceptions on integrity check failures, or we will 7156 * give the guest an incorrect EXCRET.SPSEL value on exception entry. 7157 */ 7158 write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure); 7159 7160 if (sfault) { 7161 env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK; 7162 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7163 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " 7164 "stackframe: failed EXC_RETURN.ES validity check\n"); 7165 v7m_exception_taken(cpu, excret, true, false); 7166 return; 7167 } 7168 7169 if (ufault) { 7170 /* Bad exception return: instead of popping the exception 7171 * stack, directly take a usage fault on the current stack. 7172 */ 7173 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7174 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7175 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " 7176 "stackframe: failed exception return integrity check\n"); 7177 v7m_exception_taken(cpu, excret, true, false); 7178 return; 7179 } 7180 7181 /* 7182 * Tailchaining: if there is currently a pending exception that 7183 * is high enough priority to preempt execution at the level we're 7184 * about to return to, then just directly take that exception now, 7185 * avoiding an unstack-and-then-stack. Note that now we have 7186 * deactivated the previous exception by calling armv7m_nvic_complete_irq() 7187 * our current execution priority is already the execution priority we are 7188 * returning to -- none of the state we would unstack or set based on 7189 * the EXCRET value affects it. 7190 */ 7191 if (armv7m_nvic_can_take_pending_exception(env->nvic)) { 7192 qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n"); 7193 v7m_exception_taken(cpu, excret, true, false); 7194 return; 7195 } 7196 7197 switch_v7m_security_state(env, return_to_secure); 7198 7199 { 7200 /* The stack pointer we should be reading the exception frame from 7201 * depends on bits in the magic exception return type value (and 7202 * for v8M isn't necessarily the stack pointer we will eventually 7203 * end up resuming execution with). Get a pointer to the location 7204 * in the CPU state struct where the SP we need is currently being 7205 * stored; we will use and modify it in place. 7206 * We use this limited C variable scope so we don't accidentally 7207 * use 'frame_sp_p' after we do something that makes it invalid. 7208 */ 7209 uint32_t *frame_sp_p = get_v7m_sp_ptr(env, 7210 return_to_secure, 7211 !return_to_handler, 7212 return_to_sp_process); 7213 uint32_t frameptr = *frame_sp_p; 7214 bool pop_ok = true; 7215 ARMMMUIdx mmu_idx; 7216 bool return_to_priv = return_to_handler || 7217 !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK); 7218 7219 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure, 7220 return_to_priv); 7221 7222 if (!QEMU_IS_ALIGNED(frameptr, 8) && 7223 arm_feature(env, ARM_FEATURE_V8)) { 7224 qemu_log_mask(LOG_GUEST_ERROR, 7225 "M profile exception return with non-8-aligned SP " 7226 "for destination state is UNPREDICTABLE\n"); 7227 } 7228 7229 /* Do we need to pop callee-saved registers? */ 7230 if (return_to_secure && 7231 ((excret & R_V7M_EXCRET_ES_MASK) == 0 || 7232 (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) { 7233 uint32_t expected_sig = 0xfefa125b; 7234 uint32_t actual_sig; 7235 7236 pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx); 7237 7238 if (pop_ok && expected_sig != actual_sig) { 7239 /* Take a SecureFault on the current stack */ 7240 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK; 7241 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7242 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " 7243 "stackframe: failed exception return integrity " 7244 "signature check\n"); 7245 v7m_exception_taken(cpu, excret, true, false); 7246 return; 7247 } 7248 7249 pop_ok = pop_ok && 7250 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && 7251 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && 7252 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) && 7253 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) && 7254 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) && 7255 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) && 7256 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) && 7257 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) && 7258 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx); 7259 7260 frameptr += 0x28; 7261 } 7262 7263 /* Pop registers */ 7264 pop_ok = pop_ok && 7265 v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) && 7266 v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) && 7267 v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) && 7268 v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) && 7269 v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) && 7270 v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) && 7271 v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) && 7272 v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx); 7273 7274 if (!pop_ok) { 7275 /* v7m_stack_read() pended a fault, so take it (as a tail 7276 * chained exception on the same stack frame) 7277 */ 7278 qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n"); 7279 v7m_exception_taken(cpu, excret, true, false); 7280 return; 7281 } 7282 7283 /* Returning from an exception with a PC with bit 0 set is defined 7284 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified 7285 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore 7286 * the lsbit, and there are several RTOSes out there which incorrectly 7287 * assume the r15 in the stack frame should be a Thumb-style "lsbit 7288 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but 7289 * complain about the badly behaved guest. 7290 */ 7291 if (env->regs[15] & 1) { 7292 env->regs[15] &= ~1U; 7293 if (!arm_feature(env, ARM_FEATURE_V8)) { 7294 qemu_log_mask(LOG_GUEST_ERROR, 7295 "M profile return from interrupt with misaligned " 7296 "PC is UNPREDICTABLE on v7M\n"); 7297 } 7298 } 7299 7300 if (arm_feature(env, ARM_FEATURE_V8)) { 7301 /* For v8M we have to check whether the xPSR exception field 7302 * matches the EXCRET value for return to handler/thread 7303 * before we commit to changing the SP and xPSR. 7304 */ 7305 bool will_be_handler = (xpsr & XPSR_EXCP) != 0; 7306 if (return_to_handler != will_be_handler) { 7307 /* Take an INVPC UsageFault on the current stack. 7308 * By this point we will have switched to the security state 7309 * for the background state, so this UsageFault will target 7310 * that state. 7311 */ 7312 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 7313 env->v7m.secure); 7314 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7315 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " 7316 "stackframe: failed exception return integrity " 7317 "check\n"); 7318 v7m_exception_taken(cpu, excret, true, false); 7319 return; 7320 } 7321 } 7322 7323 /* Commit to consuming the stack frame */ 7324 frameptr += 0x20; 7325 /* Undo stack alignment (the SPREALIGN bit indicates that the original 7326 * pre-exception SP was not 8-aligned and we added a padding word to 7327 * align it, so we undo this by ORing in the bit that increases it 7328 * from the current 8-aligned value to the 8-unaligned value. (Adding 4 7329 * would work too but a logical OR is how the pseudocode specifies it.) 7330 */ 7331 if (xpsr & XPSR_SPREALIGN) { 7332 frameptr |= 4; 7333 } 7334 *frame_sp_p = frameptr; 7335 } 7336 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */ 7337 xpsr_write(env, xpsr, ~XPSR_SPREALIGN); 7338 7339 /* The restored xPSR exception field will be zero if we're 7340 * resuming in Thread mode. If that doesn't match what the 7341 * exception return excret specified then this is a UsageFault. 7342 * v7M requires we make this check here; v8M did it earlier. 7343 */ 7344 if (return_to_handler != arm_v7m_is_handler_mode(env)) { 7345 /* Take an INVPC UsageFault by pushing the stack again; 7346 * we know we're v7M so this is never a Secure UsageFault. 7347 */ 7348 bool ignore_stackfaults; 7349 7350 assert(!arm_feature(env, ARM_FEATURE_V8)); 7351 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); 7352 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7353 ignore_stackfaults = v7m_push_stack(cpu); 7354 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: " 7355 "failed exception return integrity check\n"); 7356 v7m_exception_taken(cpu, excret, false, ignore_stackfaults); 7357 return; 7358 } 7359 7360 /* Otherwise, we have a successful exception exit. */ 7361 arm_clear_exclusive(env); 7362 qemu_log_mask(CPU_LOG_INT, "...successful exception return\n"); 7363 } 7364 7365 static bool do_v7m_function_return(ARMCPU *cpu) 7366 { 7367 /* v8M security extensions magic function return. 7368 * We may either: 7369 * (1) throw an exception (longjump) 7370 * (2) return true if we successfully handled the function return 7371 * (3) return false if we failed a consistency check and have 7372 * pended a UsageFault that needs to be taken now 7373 * 7374 * At this point the magic return value is split between env->regs[15] 7375 * and env->thumb. We don't bother to reconstitute it because we don't 7376 * need it (all values are handled the same way). 7377 */ 7378 CPUARMState *env = &cpu->env; 7379 uint32_t newpc, newpsr, newpsr_exc; 7380 7381 qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n"); 7382 7383 { 7384 bool threadmode, spsel; 7385 TCGMemOpIdx oi; 7386 ARMMMUIdx mmu_idx; 7387 uint32_t *frame_sp_p; 7388 uint32_t frameptr; 7389 7390 /* Pull the return address and IPSR from the Secure stack */ 7391 threadmode = !arm_v7m_is_handler_mode(env); 7392 spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK; 7393 7394 frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel); 7395 frameptr = *frame_sp_p; 7396 7397 /* These loads may throw an exception (for MPU faults). We want to 7398 * do them as secure, so work out what MMU index that is. 7399 */ 7400 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); 7401 oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx)); 7402 newpc = helper_le_ldul_mmu(env, frameptr, oi, 0); 7403 newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0); 7404 7405 /* Consistency checks on new IPSR */ 7406 newpsr_exc = newpsr & XPSR_EXCP; 7407 if (!((env->v7m.exception == 0 && newpsr_exc == 0) || 7408 (env->v7m.exception == 1 && newpsr_exc != 0))) { 7409 /* Pend the fault and tell our caller to take it */ 7410 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7411 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 7412 env->v7m.secure); 7413 qemu_log_mask(CPU_LOG_INT, 7414 "...taking INVPC UsageFault: " 7415 "IPSR consistency check failed\n"); 7416 return false; 7417 } 7418 7419 *frame_sp_p = frameptr + 8; 7420 } 7421 7422 /* This invalidates frame_sp_p */ 7423 switch_v7m_security_state(env, true); 7424 env->v7m.exception = newpsr_exc; 7425 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; 7426 if (newpsr & XPSR_SFPA) { 7427 env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK; 7428 } 7429 xpsr_write(env, 0, XPSR_IT); 7430 env->thumb = newpc & 1; 7431 env->regs[15] = newpc & ~1; 7432 7433 qemu_log_mask(CPU_LOG_INT, "...function return successful\n"); 7434 return true; 7435 } 7436 7437 static void arm_log_exception(int idx) 7438 { 7439 if (qemu_loglevel_mask(CPU_LOG_INT)) { 7440 const char *exc = NULL; 7441 static const char * const excnames[] = { 7442 [EXCP_UDEF] = "Undefined Instruction", 7443 [EXCP_SWI] = "SVC", 7444 [EXCP_PREFETCH_ABORT] = "Prefetch Abort", 7445 [EXCP_DATA_ABORT] = "Data Abort", 7446 [EXCP_IRQ] = "IRQ", 7447 [EXCP_FIQ] = "FIQ", 7448 [EXCP_BKPT] = "Breakpoint", 7449 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", 7450 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", 7451 [EXCP_HVC] = "Hypervisor Call", 7452 [EXCP_HYP_TRAP] = "Hypervisor Trap", 7453 [EXCP_SMC] = "Secure Monitor Call", 7454 [EXCP_VIRQ] = "Virtual IRQ", 7455 [EXCP_VFIQ] = "Virtual FIQ", 7456 [EXCP_SEMIHOST] = "Semihosting call", 7457 [EXCP_NOCP] = "v7M NOCP UsageFault", 7458 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", 7459 }; 7460 7461 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { 7462 exc = excnames[idx]; 7463 } 7464 if (!exc) { 7465 exc = "unknown"; 7466 } 7467 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); 7468 } 7469 } 7470 7471 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, 7472 uint32_t addr, uint16_t *insn) 7473 { 7474 /* Load a 16-bit portion of a v7M instruction, returning true on success, 7475 * or false on failure (in which case we will have pended the appropriate 7476 * exception). 7477 * We need to do the instruction fetch's MPU and SAU checks 7478 * like this because there is no MMU index that would allow 7479 * doing the load with a single function call. Instead we must 7480 * first check that the security attributes permit the load 7481 * and that they don't mismatch on the two halves of the instruction, 7482 * and then we do the load as a secure load (ie using the security 7483 * attributes of the address, not the CPU, as architecturally required). 7484 */ 7485 CPUState *cs = CPU(cpu); 7486 CPUARMState *env = &cpu->env; 7487 V8M_SAttributes sattrs = {}; 7488 MemTxAttrs attrs = {}; 7489 ARMMMUFaultInfo fi = {}; 7490 MemTxResult txres; 7491 target_ulong page_size; 7492 hwaddr physaddr; 7493 int prot; 7494 7495 v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs); 7496 if (!sattrs.nsc || sattrs.ns) { 7497 /* This must be the second half of the insn, and it straddles a 7498 * region boundary with the second half not being S&NSC. 7499 */ 7500 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7501 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7502 qemu_log_mask(CPU_LOG_INT, 7503 "...really SecureFault with SFSR.INVEP\n"); 7504 return false; 7505 } 7506 if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, 7507 &physaddr, &attrs, &prot, &page_size, &fi, NULL)) { 7508 /* the MPU lookup failed */ 7509 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; 7510 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); 7511 qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n"); 7512 return false; 7513 } 7514 *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr, 7515 attrs, &txres); 7516 if (txres != MEMTX_OK) { 7517 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; 7518 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); 7519 qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n"); 7520 return false; 7521 } 7522 return true; 7523 } 7524 7525 static bool v7m_handle_execute_nsc(ARMCPU *cpu) 7526 { 7527 /* Check whether this attempt to execute code in a Secure & NS-Callable 7528 * memory region is for an SG instruction; if so, then emulate the 7529 * effect of the SG instruction and return true. Otherwise pend 7530 * the correct kind of exception and return false. 7531 */ 7532 CPUARMState *env = &cpu->env; 7533 ARMMMUIdx mmu_idx; 7534 uint16_t insn; 7535 7536 /* We should never get here unless get_phys_addr_pmsav8() caused 7537 * an exception for NS executing in S&NSC memory. 7538 */ 7539 assert(!env->v7m.secure); 7540 assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); 7541 7542 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */ 7543 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); 7544 7545 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) { 7546 return false; 7547 } 7548 7549 if (!env->thumb) { 7550 goto gen_invep; 7551 } 7552 7553 if (insn != 0xe97f) { 7554 /* Not an SG instruction first half (we choose the IMPDEF 7555 * early-SG-check option). 7556 */ 7557 goto gen_invep; 7558 } 7559 7560 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) { 7561 return false; 7562 } 7563 7564 if (insn != 0xe97f) { 7565 /* Not an SG instruction second half (yes, both halves of the SG 7566 * insn have the same hex value) 7567 */ 7568 goto gen_invep; 7569 } 7570 7571 /* OK, we have confirmed that we really have an SG instruction. 7572 * We know we're NS in S memory so don't need to repeat those checks. 7573 */ 7574 qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32 7575 ", executing it\n", env->regs[15]); 7576 env->regs[14] &= ~1; 7577 switch_v7m_security_state(env, true); 7578 xpsr_write(env, 0, XPSR_IT); 7579 env->regs[15] += 4; 7580 return true; 7581 7582 gen_invep: 7583 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7584 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7585 qemu_log_mask(CPU_LOG_INT, 7586 "...really SecureFault with SFSR.INVEP\n"); 7587 return false; 7588 } 7589 7590 void arm_v7m_cpu_do_interrupt(CPUState *cs) 7591 { 7592 ARMCPU *cpu = ARM_CPU(cs); 7593 CPUARMState *env = &cpu->env; 7594 uint32_t lr; 7595 bool ignore_stackfaults; 7596 7597 arm_log_exception(cs->exception_index); 7598 7599 /* For exceptions we just mark as pending on the NVIC, and let that 7600 handle it. */ 7601 switch (cs->exception_index) { 7602 case EXCP_UDEF: 7603 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7604 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK; 7605 break; 7606 case EXCP_NOCP: 7607 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7608 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK; 7609 break; 7610 case EXCP_INVSTATE: 7611 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7612 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK; 7613 break; 7614 case EXCP_SWI: 7615 /* The PC already points to the next instruction. */ 7616 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure); 7617 break; 7618 case EXCP_PREFETCH_ABORT: 7619 case EXCP_DATA_ABORT: 7620 /* Note that for M profile we don't have a guest facing FSR, but 7621 * the env->exception.fsr will be populated by the code that 7622 * raises the fault, in the A profile short-descriptor format. 7623 */ 7624 switch (env->exception.fsr & 0xf) { 7625 case M_FAKE_FSR_NSC_EXEC: 7626 /* Exception generated when we try to execute code at an address 7627 * which is marked as Secure & Non-Secure Callable and the CPU 7628 * is in the Non-Secure state. The only instruction which can 7629 * be executed like this is SG (and that only if both halves of 7630 * the SG instruction have the same security attributes.) 7631 * Everything else must generate an INVEP SecureFault, so we 7632 * emulate the SG instruction here. 7633 */ 7634 if (v7m_handle_execute_nsc(cpu)) { 7635 return; 7636 } 7637 break; 7638 case M_FAKE_FSR_SFAULT: 7639 /* Various flavours of SecureFault for attempts to execute or 7640 * access data in the wrong security state. 7641 */ 7642 switch (cs->exception_index) { 7643 case EXCP_PREFETCH_ABORT: 7644 if (env->v7m.secure) { 7645 env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK; 7646 qemu_log_mask(CPU_LOG_INT, 7647 "...really SecureFault with SFSR.INVTRAN\n"); 7648 } else { 7649 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7650 qemu_log_mask(CPU_LOG_INT, 7651 "...really SecureFault with SFSR.INVEP\n"); 7652 } 7653 break; 7654 case EXCP_DATA_ABORT: 7655 /* This must be an NS access to S memory */ 7656 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK; 7657 qemu_log_mask(CPU_LOG_INT, 7658 "...really SecureFault with SFSR.AUVIOL\n"); 7659 break; 7660 } 7661 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7662 break; 7663 case 0x8: /* External Abort */ 7664 switch (cs->exception_index) { 7665 case EXCP_PREFETCH_ABORT: 7666 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; 7667 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n"); 7668 break; 7669 case EXCP_DATA_ABORT: 7670 env->v7m.cfsr[M_REG_NS] |= 7671 (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK); 7672 env->v7m.bfar = env->exception.vaddress; 7673 qemu_log_mask(CPU_LOG_INT, 7674 "...with CFSR.PRECISERR and BFAR 0x%x\n", 7675 env->v7m.bfar); 7676 break; 7677 } 7678 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); 7679 break; 7680 default: 7681 /* All other FSR values are either MPU faults or "can't happen 7682 * for M profile" cases. 7683 */ 7684 switch (cs->exception_index) { 7685 case EXCP_PREFETCH_ABORT: 7686 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; 7687 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n"); 7688 break; 7689 case EXCP_DATA_ABORT: 7690 env->v7m.cfsr[env->v7m.secure] |= 7691 (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK); 7692 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress; 7693 qemu_log_mask(CPU_LOG_INT, 7694 "...with CFSR.DACCVIOL and MMFAR 0x%x\n", 7695 env->v7m.mmfar[env->v7m.secure]); 7696 break; 7697 } 7698 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, 7699 env->v7m.secure); 7700 break; 7701 } 7702 break; 7703 case EXCP_BKPT: 7704 if (semihosting_enabled()) { 7705 int nr; 7706 nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff; 7707 if (nr == 0xab) { 7708 env->regs[15] += 2; 7709 qemu_log_mask(CPU_LOG_INT, 7710 "...handling as semihosting call 0x%x\n", 7711 env->regs[0]); 7712 env->regs[0] = do_arm_semihosting(env); 7713 return; 7714 } 7715 } 7716 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false); 7717 break; 7718 case EXCP_IRQ: 7719 break; 7720 case EXCP_EXCEPTION_EXIT: 7721 if (env->regs[15] < EXC_RETURN_MIN_MAGIC) { 7722 /* Must be v8M security extension function return */ 7723 assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC); 7724 assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); 7725 if (do_v7m_function_return(cpu)) { 7726 return; 7727 } 7728 } else { 7729 do_v7m_exception_exit(cpu); 7730 return; 7731 } 7732 break; 7733 default: 7734 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 7735 return; /* Never happens. Keep compiler happy. */ 7736 } 7737 7738 if (arm_feature(env, ARM_FEATURE_V8)) { 7739 lr = R_V7M_EXCRET_RES1_MASK | 7740 R_V7M_EXCRET_DCRS_MASK | 7741 R_V7M_EXCRET_FTYPE_MASK; 7742 /* The S bit indicates whether we should return to Secure 7743 * or NonSecure (ie our current state). 7744 * The ES bit indicates whether we're taking this exception 7745 * to Secure or NonSecure (ie our target state). We set it 7746 * later, in v7m_exception_taken(). 7747 * The SPSEL bit is also set in v7m_exception_taken() for v8M. 7748 * This corresponds to the ARM ARM pseudocode for v8M setting 7749 * some LR bits in PushStack() and some in ExceptionTaken(); 7750 * the distinction matters for the tailchain cases where we 7751 * can take an exception without pushing the stack. 7752 */ 7753 if (env->v7m.secure) { 7754 lr |= R_V7M_EXCRET_S_MASK; 7755 } 7756 } else { 7757 lr = R_V7M_EXCRET_RES1_MASK | 7758 R_V7M_EXCRET_S_MASK | 7759 R_V7M_EXCRET_DCRS_MASK | 7760 R_V7M_EXCRET_FTYPE_MASK | 7761 R_V7M_EXCRET_ES_MASK; 7762 if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) { 7763 lr |= R_V7M_EXCRET_SPSEL_MASK; 7764 } 7765 } 7766 if (!arm_v7m_is_handler_mode(env)) { 7767 lr |= R_V7M_EXCRET_MODE_MASK; 7768 } 7769 7770 ignore_stackfaults = v7m_push_stack(cpu); 7771 v7m_exception_taken(cpu, lr, false, ignore_stackfaults); 7772 } 7773 7774 /* Function used to synchronize QEMU's AArch64 register set with AArch32 7775 * register set. This is necessary when switching between AArch32 and AArch64 7776 * execution state. 7777 */ 7778 void aarch64_sync_32_to_64(CPUARMState *env) 7779 { 7780 int i; 7781 uint32_t mode = env->uncached_cpsr & CPSR_M; 7782 7783 /* We can blanket copy R[0:7] to X[0:7] */ 7784 for (i = 0; i < 8; i++) { 7785 env->xregs[i] = env->regs[i]; 7786 } 7787 7788 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. 7789 * Otherwise, they come from the banked user regs. 7790 */ 7791 if (mode == ARM_CPU_MODE_FIQ) { 7792 for (i = 8; i < 13; i++) { 7793 env->xregs[i] = env->usr_regs[i - 8]; 7794 } 7795 } else { 7796 for (i = 8; i < 13; i++) { 7797 env->xregs[i] = env->regs[i]; 7798 } 7799 } 7800 7801 /* Registers x13-x23 are the various mode SP and FP registers. Registers 7802 * r13 and r14 are only copied if we are in that mode, otherwise we copy 7803 * from the mode banked register. 7804 */ 7805 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 7806 env->xregs[13] = env->regs[13]; 7807 env->xregs[14] = env->regs[14]; 7808 } else { 7809 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; 7810 /* HYP is an exception in that it is copied from r14 */ 7811 if (mode == ARM_CPU_MODE_HYP) { 7812 env->xregs[14] = env->regs[14]; 7813 } else { 7814 env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)]; 7815 } 7816 } 7817 7818 if (mode == ARM_CPU_MODE_HYP) { 7819 env->xregs[15] = env->regs[13]; 7820 } else { 7821 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; 7822 } 7823 7824 if (mode == ARM_CPU_MODE_IRQ) { 7825 env->xregs[16] = env->regs[14]; 7826 env->xregs[17] = env->regs[13]; 7827 } else { 7828 env->xregs[16] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)]; 7829 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; 7830 } 7831 7832 if (mode == ARM_CPU_MODE_SVC) { 7833 env->xregs[18] = env->regs[14]; 7834 env->xregs[19] = env->regs[13]; 7835 } else { 7836 env->xregs[18] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)]; 7837 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; 7838 } 7839 7840 if (mode == ARM_CPU_MODE_ABT) { 7841 env->xregs[20] = env->regs[14]; 7842 env->xregs[21] = env->regs[13]; 7843 } else { 7844 env->xregs[20] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)]; 7845 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; 7846 } 7847 7848 if (mode == ARM_CPU_MODE_UND) { 7849 env->xregs[22] = env->regs[14]; 7850 env->xregs[23] = env->regs[13]; 7851 } else { 7852 env->xregs[22] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)]; 7853 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; 7854 } 7855 7856 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 7857 * mode, then we can copy from r8-r14. Otherwise, we copy from the 7858 * FIQ bank for r8-r14. 7859 */ 7860 if (mode == ARM_CPU_MODE_FIQ) { 7861 for (i = 24; i < 31; i++) { 7862 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ 7863 } 7864 } else { 7865 for (i = 24; i < 29; i++) { 7866 env->xregs[i] = env->fiq_regs[i - 24]; 7867 } 7868 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; 7869 env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)]; 7870 } 7871 7872 env->pc = env->regs[15]; 7873 } 7874 7875 /* Function used to synchronize QEMU's AArch32 register set with AArch64 7876 * register set. This is necessary when switching between AArch32 and AArch64 7877 * execution state. 7878 */ 7879 void aarch64_sync_64_to_32(CPUARMState *env) 7880 { 7881 int i; 7882 uint32_t mode = env->uncached_cpsr & CPSR_M; 7883 7884 /* We can blanket copy X[0:7] to R[0:7] */ 7885 for (i = 0; i < 8; i++) { 7886 env->regs[i] = env->xregs[i]; 7887 } 7888 7889 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. 7890 * Otherwise, we copy x8-x12 into the banked user regs. 7891 */ 7892 if (mode == ARM_CPU_MODE_FIQ) { 7893 for (i = 8; i < 13; i++) { 7894 env->usr_regs[i - 8] = env->xregs[i]; 7895 } 7896 } else { 7897 for (i = 8; i < 13; i++) { 7898 env->regs[i] = env->xregs[i]; 7899 } 7900 } 7901 7902 /* Registers r13 & r14 depend on the current mode. 7903 * If we are in a given mode, we copy the corresponding x registers to r13 7904 * and r14. Otherwise, we copy the x register to the banked r13 and r14 7905 * for the mode. 7906 */ 7907 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 7908 env->regs[13] = env->xregs[13]; 7909 env->regs[14] = env->xregs[14]; 7910 } else { 7911 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; 7912 7913 /* HYP is an exception in that it does not have its own banked r14 but 7914 * shares the USR r14 7915 */ 7916 if (mode == ARM_CPU_MODE_HYP) { 7917 env->regs[14] = env->xregs[14]; 7918 } else { 7919 env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; 7920 } 7921 } 7922 7923 if (mode == ARM_CPU_MODE_HYP) { 7924 env->regs[13] = env->xregs[15]; 7925 } else { 7926 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; 7927 } 7928 7929 if (mode == ARM_CPU_MODE_IRQ) { 7930 env->regs[14] = env->xregs[16]; 7931 env->regs[13] = env->xregs[17]; 7932 } else { 7933 env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; 7934 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; 7935 } 7936 7937 if (mode == ARM_CPU_MODE_SVC) { 7938 env->regs[14] = env->xregs[18]; 7939 env->regs[13] = env->xregs[19]; 7940 } else { 7941 env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; 7942 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; 7943 } 7944 7945 if (mode == ARM_CPU_MODE_ABT) { 7946 env->regs[14] = env->xregs[20]; 7947 env->regs[13] = env->xregs[21]; 7948 } else { 7949 env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; 7950 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; 7951 } 7952 7953 if (mode == ARM_CPU_MODE_UND) { 7954 env->regs[14] = env->xregs[22]; 7955 env->regs[13] = env->xregs[23]; 7956 } else { 7957 env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; 7958 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; 7959 } 7960 7961 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 7962 * mode, then we can copy to r8-r14. Otherwise, we copy to the 7963 * FIQ bank for r8-r14. 7964 */ 7965 if (mode == ARM_CPU_MODE_FIQ) { 7966 for (i = 24; i < 31; i++) { 7967 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ 7968 } 7969 } else { 7970 for (i = 24; i < 29; i++) { 7971 env->fiq_regs[i - 24] = env->xregs[i]; 7972 } 7973 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; 7974 env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; 7975 } 7976 7977 env->regs[15] = env->pc; 7978 } 7979 7980 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) 7981 { 7982 ARMCPU *cpu = ARM_CPU(cs); 7983 CPUARMState *env = &cpu->env; 7984 uint32_t addr; 7985 uint32_t mask; 7986 int new_mode; 7987 uint32_t offset; 7988 uint32_t moe; 7989 7990 /* If this is a debug exception we must update the DBGDSCR.MOE bits */ 7991 switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) { 7992 case EC_BREAKPOINT: 7993 case EC_BREAKPOINT_SAME_EL: 7994 moe = 1; 7995 break; 7996 case EC_WATCHPOINT: 7997 case EC_WATCHPOINT_SAME_EL: 7998 moe = 10; 7999 break; 8000 case EC_AA32_BKPT: 8001 moe = 3; 8002 break; 8003 case EC_VECTORCATCH: 8004 moe = 5; 8005 break; 8006 default: 8007 moe = 0; 8008 break; 8009 } 8010 8011 if (moe) { 8012 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); 8013 } 8014 8015 /* TODO: Vectored interrupt controller. */ 8016 switch (cs->exception_index) { 8017 case EXCP_UDEF: 8018 new_mode = ARM_CPU_MODE_UND; 8019 addr = 0x04; 8020 mask = CPSR_I; 8021 if (env->thumb) 8022 offset = 2; 8023 else 8024 offset = 4; 8025 break; 8026 case EXCP_SWI: 8027 new_mode = ARM_CPU_MODE_SVC; 8028 addr = 0x08; 8029 mask = CPSR_I; 8030 /* The PC already points to the next instruction. */ 8031 offset = 0; 8032 break; 8033 case EXCP_BKPT: 8034 /* Fall through to prefetch abort. */ 8035 case EXCP_PREFETCH_ABORT: 8036 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); 8037 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); 8038 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", 8039 env->exception.fsr, (uint32_t)env->exception.vaddress); 8040 new_mode = ARM_CPU_MODE_ABT; 8041 addr = 0x0c; 8042 mask = CPSR_A | CPSR_I; 8043 offset = 4; 8044 break; 8045 case EXCP_DATA_ABORT: 8046 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 8047 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); 8048 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", 8049 env->exception.fsr, 8050 (uint32_t)env->exception.vaddress); 8051 new_mode = ARM_CPU_MODE_ABT; 8052 addr = 0x10; 8053 mask = CPSR_A | CPSR_I; 8054 offset = 8; 8055 break; 8056 case EXCP_IRQ: 8057 new_mode = ARM_CPU_MODE_IRQ; 8058 addr = 0x18; 8059 /* Disable IRQ and imprecise data aborts. */ 8060 mask = CPSR_A | CPSR_I; 8061 offset = 4; 8062 if (env->cp15.scr_el3 & SCR_IRQ) { 8063 /* IRQ routed to monitor mode */ 8064 new_mode = ARM_CPU_MODE_MON; 8065 mask |= CPSR_F; 8066 } 8067 break; 8068 case EXCP_FIQ: 8069 new_mode = ARM_CPU_MODE_FIQ; 8070 addr = 0x1c; 8071 /* Disable FIQ, IRQ and imprecise data aborts. */ 8072 mask = CPSR_A | CPSR_I | CPSR_F; 8073 if (env->cp15.scr_el3 & SCR_FIQ) { 8074 /* FIQ routed to monitor mode */ 8075 new_mode = ARM_CPU_MODE_MON; 8076 } 8077 offset = 4; 8078 break; 8079 case EXCP_VIRQ: 8080 new_mode = ARM_CPU_MODE_IRQ; 8081 addr = 0x18; 8082 /* Disable IRQ and imprecise data aborts. */ 8083 mask = CPSR_A | CPSR_I; 8084 offset = 4; 8085 break; 8086 case EXCP_VFIQ: 8087 new_mode = ARM_CPU_MODE_FIQ; 8088 addr = 0x1c; 8089 /* Disable FIQ, IRQ and imprecise data aborts. */ 8090 mask = CPSR_A | CPSR_I | CPSR_F; 8091 offset = 4; 8092 break; 8093 case EXCP_SMC: 8094 new_mode = ARM_CPU_MODE_MON; 8095 addr = 0x08; 8096 mask = CPSR_A | CPSR_I | CPSR_F; 8097 offset = 0; 8098 break; 8099 default: 8100 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8101 return; /* Never happens. Keep compiler happy. */ 8102 } 8103 8104 if (new_mode == ARM_CPU_MODE_MON) { 8105 addr += env->cp15.mvbar; 8106 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { 8107 /* High vectors. When enabled, base address cannot be remapped. */ 8108 addr += 0xffff0000; 8109 } else { 8110 /* ARM v7 architectures provide a vector base address register to remap 8111 * the interrupt vector table. 8112 * This register is only followed in non-monitor mode, and is banked. 8113 * Note: only bits 31:5 are valid. 8114 */ 8115 addr += A32_BANKED_CURRENT_REG_GET(env, vbar); 8116 } 8117 8118 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 8119 env->cp15.scr_el3 &= ~SCR_NS; 8120 } 8121 8122 switch_mode (env, new_mode); 8123 /* For exceptions taken to AArch32 we must clear the SS bit in both 8124 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. 8125 */ 8126 env->uncached_cpsr &= ~PSTATE_SS; 8127 env->spsr = cpsr_read(env); 8128 /* Clear IT bits. */ 8129 env->condexec_bits = 0; 8130 /* Switch to the new mode, and to the correct instruction set. */ 8131 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 8132 /* Set new mode endianness */ 8133 env->uncached_cpsr &= ~CPSR_E; 8134 if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) { 8135 env->uncached_cpsr |= CPSR_E; 8136 } 8137 env->daif |= mask; 8138 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares 8139 * and we should just guard the thumb mode on V4 */ 8140 if (arm_feature(env, ARM_FEATURE_V4T)) { 8141 env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; 8142 } 8143 env->regs[14] = env->regs[15] + offset; 8144 env->regs[15] = addr; 8145 } 8146 8147 /* Handle exception entry to a target EL which is using AArch64 */ 8148 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) 8149 { 8150 ARMCPU *cpu = ARM_CPU(cs); 8151 CPUARMState *env = &cpu->env; 8152 unsigned int new_el = env->exception.target_el; 8153 target_ulong addr = env->cp15.vbar_el[new_el]; 8154 unsigned int new_mode = aarch64_pstate_mode(new_el, true); 8155 8156 if (arm_current_el(env) < new_el) { 8157 /* Entry vector offset depends on whether the implemented EL 8158 * immediately lower than the target level is using AArch32 or AArch64 8159 */ 8160 bool is_aa64; 8161 8162 switch (new_el) { 8163 case 3: 8164 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; 8165 break; 8166 case 2: 8167 is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0; 8168 break; 8169 case 1: 8170 is_aa64 = is_a64(env); 8171 break; 8172 default: 8173 g_assert_not_reached(); 8174 } 8175 8176 if (is_aa64) { 8177 addr += 0x400; 8178 } else { 8179 addr += 0x600; 8180 } 8181 } else if (pstate_read(env) & PSTATE_SP) { 8182 addr += 0x200; 8183 } 8184 8185 switch (cs->exception_index) { 8186 case EXCP_PREFETCH_ABORT: 8187 case EXCP_DATA_ABORT: 8188 env->cp15.far_el[new_el] = env->exception.vaddress; 8189 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", 8190 env->cp15.far_el[new_el]); 8191 /* fall through */ 8192 case EXCP_BKPT: 8193 case EXCP_UDEF: 8194 case EXCP_SWI: 8195 case EXCP_HVC: 8196 case EXCP_HYP_TRAP: 8197 case EXCP_SMC: 8198 env->cp15.esr_el[new_el] = env->exception.syndrome; 8199 break; 8200 case EXCP_IRQ: 8201 case EXCP_VIRQ: 8202 addr += 0x80; 8203 break; 8204 case EXCP_FIQ: 8205 case EXCP_VFIQ: 8206 addr += 0x100; 8207 break; 8208 case EXCP_SEMIHOST: 8209 qemu_log_mask(CPU_LOG_INT, 8210 "...handling as semihosting call 0x%" PRIx64 "\n", 8211 env->xregs[0]); 8212 env->xregs[0] = do_arm_semihosting(env); 8213 return; 8214 default: 8215 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8216 } 8217 8218 if (is_a64(env)) { 8219 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env); 8220 aarch64_save_sp(env, arm_current_el(env)); 8221 env->elr_el[new_el] = env->pc; 8222 } else { 8223 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env); 8224 env->elr_el[new_el] = env->regs[15]; 8225 8226 aarch64_sync_32_to_64(env); 8227 8228 env->condexec_bits = 0; 8229 } 8230 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", 8231 env->elr_el[new_el]); 8232 8233 pstate_write(env, PSTATE_DAIF | new_mode); 8234 env->aarch64 = 1; 8235 aarch64_restore_sp(env, new_el); 8236 8237 env->pc = addr; 8238 8239 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", 8240 new_el, env->pc, pstate_read(env)); 8241 } 8242 8243 static inline bool check_for_semihosting(CPUState *cs) 8244 { 8245 /* Check whether this exception is a semihosting call; if so 8246 * then handle it and return true; otherwise return false. 8247 */ 8248 ARMCPU *cpu = ARM_CPU(cs); 8249 CPUARMState *env = &cpu->env; 8250 8251 if (is_a64(env)) { 8252 if (cs->exception_index == EXCP_SEMIHOST) { 8253 /* This is always the 64-bit semihosting exception. 8254 * The "is this usermode" and "is semihosting enabled" 8255 * checks have been done at translate time. 8256 */ 8257 qemu_log_mask(CPU_LOG_INT, 8258 "...handling as semihosting call 0x%" PRIx64 "\n", 8259 env->xregs[0]); 8260 env->xregs[0] = do_arm_semihosting(env); 8261 return true; 8262 } 8263 return false; 8264 } else { 8265 uint32_t imm; 8266 8267 /* Only intercept calls from privileged modes, to provide some 8268 * semblance of security. 8269 */ 8270 if (cs->exception_index != EXCP_SEMIHOST && 8271 (!semihosting_enabled() || 8272 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) { 8273 return false; 8274 } 8275 8276 switch (cs->exception_index) { 8277 case EXCP_SEMIHOST: 8278 /* This is always a semihosting call; the "is this usermode" 8279 * and "is semihosting enabled" checks have been done at 8280 * translate time. 8281 */ 8282 break; 8283 case EXCP_SWI: 8284 /* Check for semihosting interrupt. */ 8285 if (env->thumb) { 8286 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env)) 8287 & 0xff; 8288 if (imm == 0xab) { 8289 break; 8290 } 8291 } else { 8292 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env)) 8293 & 0xffffff; 8294 if (imm == 0x123456) { 8295 break; 8296 } 8297 } 8298 return false; 8299 case EXCP_BKPT: 8300 /* See if this is a semihosting syscall. */ 8301 if (env->thumb) { 8302 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) 8303 & 0xff; 8304 if (imm == 0xab) { 8305 env->regs[15] += 2; 8306 break; 8307 } 8308 } 8309 return false; 8310 default: 8311 return false; 8312 } 8313 8314 qemu_log_mask(CPU_LOG_INT, 8315 "...handling as semihosting call 0x%x\n", 8316 env->regs[0]); 8317 env->regs[0] = do_arm_semihosting(env); 8318 return true; 8319 } 8320 } 8321 8322 /* Handle a CPU exception for A and R profile CPUs. 8323 * Do any appropriate logging, handle PSCI calls, and then hand off 8324 * to the AArch64-entry or AArch32-entry function depending on the 8325 * target exception level's register width. 8326 */ 8327 void arm_cpu_do_interrupt(CPUState *cs) 8328 { 8329 ARMCPU *cpu = ARM_CPU(cs); 8330 CPUARMState *env = &cpu->env; 8331 unsigned int new_el = env->exception.target_el; 8332 8333 assert(!arm_feature(env, ARM_FEATURE_M)); 8334 8335 arm_log_exception(cs->exception_index); 8336 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), 8337 new_el); 8338 if (qemu_loglevel_mask(CPU_LOG_INT) 8339 && !excp_is_internal(cs->exception_index)) { 8340 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", 8341 env->exception.syndrome >> ARM_EL_EC_SHIFT, 8342 env->exception.syndrome); 8343 } 8344 8345 if (arm_is_psci_call(cpu, cs->exception_index)) { 8346 arm_handle_psci_call(cpu); 8347 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); 8348 return; 8349 } 8350 8351 /* Semihosting semantics depend on the register width of the 8352 * code that caused the exception, not the target exception level, 8353 * so must be handled here. 8354 */ 8355 if (check_for_semihosting(cs)) { 8356 return; 8357 } 8358 8359 /* Hooks may change global state so BQL should be held, also the 8360 * BQL needs to be held for any modification of 8361 * cs->interrupt_request. 8362 */ 8363 g_assert(qemu_mutex_iothread_locked()); 8364 8365 arm_call_pre_el_change_hook(cpu); 8366 8367 assert(!excp_is_internal(cs->exception_index)); 8368 if (arm_el_is_aa64(env, new_el)) { 8369 arm_cpu_do_interrupt_aarch64(cs); 8370 } else { 8371 arm_cpu_do_interrupt_aarch32(cs); 8372 } 8373 8374 arm_call_el_change_hook(cpu); 8375 8376 if (!kvm_enabled()) { 8377 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; 8378 } 8379 } 8380 8381 /* Return the exception level which controls this address translation regime */ 8382 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 8383 { 8384 switch (mmu_idx) { 8385 case ARMMMUIdx_S2NS: 8386 case ARMMMUIdx_S1E2: 8387 return 2; 8388 case ARMMMUIdx_S1E3: 8389 return 3; 8390 case ARMMMUIdx_S1SE0: 8391 return arm_el_is_aa64(env, 3) ? 1 : 3; 8392 case ARMMMUIdx_S1SE1: 8393 case ARMMMUIdx_S1NSE0: 8394 case ARMMMUIdx_S1NSE1: 8395 case ARMMMUIdx_MPrivNegPri: 8396 case ARMMMUIdx_MUserNegPri: 8397 case ARMMMUIdx_MPriv: 8398 case ARMMMUIdx_MUser: 8399 case ARMMMUIdx_MSPrivNegPri: 8400 case ARMMMUIdx_MSUserNegPri: 8401 case ARMMMUIdx_MSPriv: 8402 case ARMMMUIdx_MSUser: 8403 return 1; 8404 default: 8405 g_assert_not_reached(); 8406 } 8407 } 8408 8409 /* Return the SCTLR value which controls this address translation regime */ 8410 static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 8411 { 8412 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 8413 } 8414 8415 /* Return true if the specified stage of address translation is disabled */ 8416 static inline bool regime_translation_disabled(CPUARMState *env, 8417 ARMMMUIdx mmu_idx) 8418 { 8419 if (arm_feature(env, ARM_FEATURE_M)) { 8420 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & 8421 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 8422 case R_V7M_MPU_CTRL_ENABLE_MASK: 8423 /* Enabled, but not for HardFault and NMI */ 8424 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 8425 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 8426 /* Enabled for all cases */ 8427 return false; 8428 case 0: 8429 default: 8430 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 8431 * we warned about that in armv7m_nvic.c when the guest set it. 8432 */ 8433 return true; 8434 } 8435 } 8436 8437 if (mmu_idx == ARMMMUIdx_S2NS) { 8438 return (env->cp15.hcr_el2 & HCR_VM) == 0; 8439 } 8440 8441 if (env->cp15.hcr_el2 & HCR_TGE) { 8442 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */ 8443 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) { 8444 return true; 8445 } 8446 } 8447 8448 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 8449 } 8450 8451 static inline bool regime_translation_big_endian(CPUARMState *env, 8452 ARMMMUIdx mmu_idx) 8453 { 8454 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 8455 } 8456 8457 /* Return the TCR controlling this translation regime */ 8458 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 8459 { 8460 if (mmu_idx == ARMMMUIdx_S2NS) { 8461 return &env->cp15.vtcr_el2; 8462 } 8463 return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; 8464 } 8465 8466 /* Convert a possible stage1+2 MMU index into the appropriate 8467 * stage 1 MMU index 8468 */ 8469 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 8470 { 8471 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 8472 mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0); 8473 } 8474 return mmu_idx; 8475 } 8476 8477 /* Returns TBI0 value for current regime el */ 8478 uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx) 8479 { 8480 TCR *tcr; 8481 uint32_t el; 8482 8483 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert 8484 * a stage 1+2 mmu index into the appropriate stage 1 mmu index. 8485 */ 8486 mmu_idx = stage_1_mmu_idx(mmu_idx); 8487 8488 tcr = regime_tcr(env, mmu_idx); 8489 el = regime_el(env, mmu_idx); 8490 8491 if (el > 1) { 8492 return extract64(tcr->raw_tcr, 20, 1); 8493 } else { 8494 return extract64(tcr->raw_tcr, 37, 1); 8495 } 8496 } 8497 8498 /* Returns TBI1 value for current regime el */ 8499 uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx) 8500 { 8501 TCR *tcr; 8502 uint32_t el; 8503 8504 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert 8505 * a stage 1+2 mmu index into the appropriate stage 1 mmu index. 8506 */ 8507 mmu_idx = stage_1_mmu_idx(mmu_idx); 8508 8509 tcr = regime_tcr(env, mmu_idx); 8510 el = regime_el(env, mmu_idx); 8511 8512 if (el > 1) { 8513 return 0; 8514 } else { 8515 return extract64(tcr->raw_tcr, 38, 1); 8516 } 8517 } 8518 8519 /* Return the TTBR associated with this translation regime */ 8520 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, 8521 int ttbrn) 8522 { 8523 if (mmu_idx == ARMMMUIdx_S2NS) { 8524 return env->cp15.vttbr_el2; 8525 } 8526 if (ttbrn == 0) { 8527 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 8528 } else { 8529 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 8530 } 8531 } 8532 8533 /* Return true if the translation regime is using LPAE format page tables */ 8534 static inline bool regime_using_lpae_format(CPUARMState *env, 8535 ARMMMUIdx mmu_idx) 8536 { 8537 int el = regime_el(env, mmu_idx); 8538 if (el == 2 || arm_el_is_aa64(env, el)) { 8539 return true; 8540 } 8541 if (arm_feature(env, ARM_FEATURE_LPAE) 8542 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { 8543 return true; 8544 } 8545 return false; 8546 } 8547 8548 /* Returns true if the stage 1 translation regime is using LPAE format page 8549 * tables. Used when raising alignment exceptions, whose FSR changes depending 8550 * on whether the long or short descriptor format is in use. */ 8551 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 8552 { 8553 mmu_idx = stage_1_mmu_idx(mmu_idx); 8554 8555 return regime_using_lpae_format(env, mmu_idx); 8556 } 8557 8558 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 8559 { 8560 switch (mmu_idx) { 8561 case ARMMMUIdx_S1SE0: 8562 case ARMMMUIdx_S1NSE0: 8563 case ARMMMUIdx_MUser: 8564 case ARMMMUIdx_MSUser: 8565 case ARMMMUIdx_MUserNegPri: 8566 case ARMMMUIdx_MSUserNegPri: 8567 return true; 8568 default: 8569 return false; 8570 case ARMMMUIdx_S12NSE0: 8571 case ARMMMUIdx_S12NSE1: 8572 g_assert_not_reached(); 8573 } 8574 } 8575 8576 /* Translate section/page access permissions to page 8577 * R/W protection flags 8578 * 8579 * @env: CPUARMState 8580 * @mmu_idx: MMU index indicating required translation regime 8581 * @ap: The 3-bit access permissions (AP[2:0]) 8582 * @domain_prot: The 2-bit domain access permissions 8583 */ 8584 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 8585 int ap, int domain_prot) 8586 { 8587 bool is_user = regime_is_user(env, mmu_idx); 8588 8589 if (domain_prot == 3) { 8590 return PAGE_READ | PAGE_WRITE; 8591 } 8592 8593 switch (ap) { 8594 case 0: 8595 if (arm_feature(env, ARM_FEATURE_V7)) { 8596 return 0; 8597 } 8598 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 8599 case SCTLR_S: 8600 return is_user ? 0 : PAGE_READ; 8601 case SCTLR_R: 8602 return PAGE_READ; 8603 default: 8604 return 0; 8605 } 8606 case 1: 8607 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 8608 case 2: 8609 if (is_user) { 8610 return PAGE_READ; 8611 } else { 8612 return PAGE_READ | PAGE_WRITE; 8613 } 8614 case 3: 8615 return PAGE_READ | PAGE_WRITE; 8616 case 4: /* Reserved. */ 8617 return 0; 8618 case 5: 8619 return is_user ? 0 : PAGE_READ; 8620 case 6: 8621 return PAGE_READ; 8622 case 7: 8623 if (!arm_feature(env, ARM_FEATURE_V6K)) { 8624 return 0; 8625 } 8626 return PAGE_READ; 8627 default: 8628 g_assert_not_reached(); 8629 } 8630 } 8631 8632 /* Translate section/page access permissions to page 8633 * R/W protection flags. 8634 * 8635 * @ap: The 2-bit simple AP (AP[2:1]) 8636 * @is_user: TRUE if accessing from PL0 8637 */ 8638 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 8639 { 8640 switch (ap) { 8641 case 0: 8642 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 8643 case 1: 8644 return PAGE_READ | PAGE_WRITE; 8645 case 2: 8646 return is_user ? 0 : PAGE_READ; 8647 case 3: 8648 return PAGE_READ; 8649 default: 8650 g_assert_not_reached(); 8651 } 8652 } 8653 8654 static inline int 8655 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 8656 { 8657 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 8658 } 8659 8660 /* Translate S2 section/page access permissions to protection flags 8661 * 8662 * @env: CPUARMState 8663 * @s2ap: The 2-bit stage2 access permissions (S2AP) 8664 * @xn: XN (execute-never) bit 8665 */ 8666 static int get_S2prot(CPUARMState *env, int s2ap, int xn) 8667 { 8668 int prot = 0; 8669 8670 if (s2ap & 1) { 8671 prot |= PAGE_READ; 8672 } 8673 if (s2ap & 2) { 8674 prot |= PAGE_WRITE; 8675 } 8676 if (!xn) { 8677 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 8678 prot |= PAGE_EXEC; 8679 } 8680 } 8681 return prot; 8682 } 8683 8684 /* Translate section/page access permissions to protection flags 8685 * 8686 * @env: CPUARMState 8687 * @mmu_idx: MMU index indicating required translation regime 8688 * @is_aa64: TRUE if AArch64 8689 * @ap: The 2-bit simple AP (AP[2:1]) 8690 * @ns: NS (non-secure) bit 8691 * @xn: XN (execute-never) bit 8692 * @pxn: PXN (privileged execute-never) bit 8693 */ 8694 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 8695 int ap, int ns, int xn, int pxn) 8696 { 8697 bool is_user = regime_is_user(env, mmu_idx); 8698 int prot_rw, user_rw; 8699 bool have_wxn; 8700 int wxn = 0; 8701 8702 assert(mmu_idx != ARMMMUIdx_S2NS); 8703 8704 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 8705 if (is_user) { 8706 prot_rw = user_rw; 8707 } else { 8708 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 8709 } 8710 8711 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { 8712 return prot_rw; 8713 } 8714 8715 /* TODO have_wxn should be replaced with 8716 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 8717 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 8718 * compatible processors have EL2, which is required for [U]WXN. 8719 */ 8720 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 8721 8722 if (have_wxn) { 8723 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 8724 } 8725 8726 if (is_aa64) { 8727 switch (regime_el(env, mmu_idx)) { 8728 case 1: 8729 if (!is_user) { 8730 xn = pxn || (user_rw & PAGE_WRITE); 8731 } 8732 break; 8733 case 2: 8734 case 3: 8735 break; 8736 } 8737 } else if (arm_feature(env, ARM_FEATURE_V7)) { 8738 switch (regime_el(env, mmu_idx)) { 8739 case 1: 8740 case 3: 8741 if (is_user) { 8742 xn = xn || !(user_rw & PAGE_READ); 8743 } else { 8744 int uwxn = 0; 8745 if (have_wxn) { 8746 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 8747 } 8748 xn = xn || !(prot_rw & PAGE_READ) || pxn || 8749 (uwxn && (user_rw & PAGE_WRITE)); 8750 } 8751 break; 8752 case 2: 8753 break; 8754 } 8755 } else { 8756 xn = wxn = 0; 8757 } 8758 8759 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 8760 return prot_rw; 8761 } 8762 return prot_rw | PAGE_EXEC; 8763 } 8764 8765 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 8766 uint32_t *table, uint32_t address) 8767 { 8768 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 8769 TCR *tcr = regime_tcr(env, mmu_idx); 8770 8771 if (address & tcr->mask) { 8772 if (tcr->raw_tcr & TTBCR_PD1) { 8773 /* Translation table walk disabled for TTBR1 */ 8774 return false; 8775 } 8776 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 8777 } else { 8778 if (tcr->raw_tcr & TTBCR_PD0) { 8779 /* Translation table walk disabled for TTBR0 */ 8780 return false; 8781 } 8782 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; 8783 } 8784 *table |= (address >> 18) & 0x3ffc; 8785 return true; 8786 } 8787 8788 /* Translate a S1 pagetable walk through S2 if needed. */ 8789 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, 8790 hwaddr addr, MemTxAttrs txattrs, 8791 ARMMMUFaultInfo *fi) 8792 { 8793 if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) && 8794 !regime_translation_disabled(env, ARMMMUIdx_S2NS)) { 8795 target_ulong s2size; 8796 hwaddr s2pa; 8797 int s2prot; 8798 int ret; 8799 8800 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa, 8801 &txattrs, &s2prot, &s2size, fi, NULL); 8802 if (ret) { 8803 assert(fi->type != ARMFault_None); 8804 fi->s2addr = addr; 8805 fi->stage2 = true; 8806 fi->s1ptw = true; 8807 return ~0; 8808 } 8809 addr = s2pa; 8810 } 8811 return addr; 8812 } 8813 8814 /* All loads done in the course of a page table walk go through here. */ 8815 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, 8816 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 8817 { 8818 ARMCPU *cpu = ARM_CPU(cs); 8819 CPUARMState *env = &cpu->env; 8820 MemTxAttrs attrs = {}; 8821 MemTxResult result = MEMTX_OK; 8822 AddressSpace *as; 8823 uint32_t data; 8824 8825 attrs.secure = is_secure; 8826 as = arm_addressspace(cs, attrs); 8827 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 8828 if (fi->s1ptw) { 8829 return 0; 8830 } 8831 if (regime_translation_big_endian(env, mmu_idx)) { 8832 data = address_space_ldl_be(as, addr, attrs, &result); 8833 } else { 8834 data = address_space_ldl_le(as, addr, attrs, &result); 8835 } 8836 if (result == MEMTX_OK) { 8837 return data; 8838 } 8839 fi->type = ARMFault_SyncExternalOnWalk; 8840 fi->ea = arm_extabort_type(result); 8841 return 0; 8842 } 8843 8844 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, 8845 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 8846 { 8847 ARMCPU *cpu = ARM_CPU(cs); 8848 CPUARMState *env = &cpu->env; 8849 MemTxAttrs attrs = {}; 8850 MemTxResult result = MEMTX_OK; 8851 AddressSpace *as; 8852 uint64_t data; 8853 8854 attrs.secure = is_secure; 8855 as = arm_addressspace(cs, attrs); 8856 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 8857 if (fi->s1ptw) { 8858 return 0; 8859 } 8860 if (regime_translation_big_endian(env, mmu_idx)) { 8861 data = address_space_ldq_be(as, addr, attrs, &result); 8862 } else { 8863 data = address_space_ldq_le(as, addr, attrs, &result); 8864 } 8865 if (result == MEMTX_OK) { 8866 return data; 8867 } 8868 fi->type = ARMFault_SyncExternalOnWalk; 8869 fi->ea = arm_extabort_type(result); 8870 return 0; 8871 } 8872 8873 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, 8874 MMUAccessType access_type, ARMMMUIdx mmu_idx, 8875 hwaddr *phys_ptr, int *prot, 8876 target_ulong *page_size, 8877 ARMMMUFaultInfo *fi) 8878 { 8879 CPUState *cs = CPU(arm_env_get_cpu(env)); 8880 int level = 1; 8881 uint32_t table; 8882 uint32_t desc; 8883 int type; 8884 int ap; 8885 int domain = 0; 8886 int domain_prot; 8887 hwaddr phys_addr; 8888 uint32_t dacr; 8889 8890 /* Pagetable walk. */ 8891 /* Lookup l1 descriptor. */ 8892 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 8893 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 8894 fi->type = ARMFault_Translation; 8895 goto do_fault; 8896 } 8897 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 8898 mmu_idx, fi); 8899 if (fi->type != ARMFault_None) { 8900 goto do_fault; 8901 } 8902 type = (desc & 3); 8903 domain = (desc >> 5) & 0x0f; 8904 if (regime_el(env, mmu_idx) == 1) { 8905 dacr = env->cp15.dacr_ns; 8906 } else { 8907 dacr = env->cp15.dacr_s; 8908 } 8909 domain_prot = (dacr >> (domain * 2)) & 3; 8910 if (type == 0) { 8911 /* Section translation fault. */ 8912 fi->type = ARMFault_Translation; 8913 goto do_fault; 8914 } 8915 if (type != 2) { 8916 level = 2; 8917 } 8918 if (domain_prot == 0 || domain_prot == 2) { 8919 fi->type = ARMFault_Domain; 8920 goto do_fault; 8921 } 8922 if (type == 2) { 8923 /* 1Mb section. */ 8924 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 8925 ap = (desc >> 10) & 3; 8926 *page_size = 1024 * 1024; 8927 } else { 8928 /* Lookup l2 entry. */ 8929 if (type == 1) { 8930 /* Coarse pagetable. */ 8931 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 8932 } else { 8933 /* Fine pagetable. */ 8934 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 8935 } 8936 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 8937 mmu_idx, fi); 8938 if (fi->type != ARMFault_None) { 8939 goto do_fault; 8940 } 8941 switch (desc & 3) { 8942 case 0: /* Page translation fault. */ 8943 fi->type = ARMFault_Translation; 8944 goto do_fault; 8945 case 1: /* 64k page. */ 8946 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 8947 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 8948 *page_size = 0x10000; 8949 break; 8950 case 2: /* 4k page. */ 8951 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 8952 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 8953 *page_size = 0x1000; 8954 break; 8955 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 8956 if (type == 1) { 8957 /* ARMv6/XScale extended small page format */ 8958 if (arm_feature(env, ARM_FEATURE_XSCALE) 8959 || arm_feature(env, ARM_FEATURE_V6)) { 8960 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 8961 *page_size = 0x1000; 8962 } else { 8963 /* UNPREDICTABLE in ARMv5; we choose to take a 8964 * page translation fault. 8965 */ 8966 fi->type = ARMFault_Translation; 8967 goto do_fault; 8968 } 8969 } else { 8970 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 8971 *page_size = 0x400; 8972 } 8973 ap = (desc >> 4) & 3; 8974 break; 8975 default: 8976 /* Never happens, but compiler isn't smart enough to tell. */ 8977 abort(); 8978 } 8979 } 8980 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 8981 *prot |= *prot ? PAGE_EXEC : 0; 8982 if (!(*prot & (1 << access_type))) { 8983 /* Access permission fault. */ 8984 fi->type = ARMFault_Permission; 8985 goto do_fault; 8986 } 8987 *phys_ptr = phys_addr; 8988 return false; 8989 do_fault: 8990 fi->domain = domain; 8991 fi->level = level; 8992 return true; 8993 } 8994 8995 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, 8996 MMUAccessType access_type, ARMMMUIdx mmu_idx, 8997 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 8998 target_ulong *page_size, ARMMMUFaultInfo *fi) 8999 { 9000 CPUState *cs = CPU(arm_env_get_cpu(env)); 9001 int level = 1; 9002 uint32_t table; 9003 uint32_t desc; 9004 uint32_t xn; 9005 uint32_t pxn = 0; 9006 int type; 9007 int ap; 9008 int domain = 0; 9009 int domain_prot; 9010 hwaddr phys_addr; 9011 uint32_t dacr; 9012 bool ns; 9013 9014 /* Pagetable walk. */ 9015 /* Lookup l1 descriptor. */ 9016 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 9017 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 9018 fi->type = ARMFault_Translation; 9019 goto do_fault; 9020 } 9021 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 9022 mmu_idx, fi); 9023 if (fi->type != ARMFault_None) { 9024 goto do_fault; 9025 } 9026 type = (desc & 3); 9027 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { 9028 /* Section translation fault, or attempt to use the encoding 9029 * which is Reserved on implementations without PXN. 9030 */ 9031 fi->type = ARMFault_Translation; 9032 goto do_fault; 9033 } 9034 if ((type == 1) || !(desc & (1 << 18))) { 9035 /* Page or Section. */ 9036 domain = (desc >> 5) & 0x0f; 9037 } 9038 if (regime_el(env, mmu_idx) == 1) { 9039 dacr = env->cp15.dacr_ns; 9040 } else { 9041 dacr = env->cp15.dacr_s; 9042 } 9043 if (type == 1) { 9044 level = 2; 9045 } 9046 domain_prot = (dacr >> (domain * 2)) & 3; 9047 if (domain_prot == 0 || domain_prot == 2) { 9048 /* Section or Page domain fault */ 9049 fi->type = ARMFault_Domain; 9050 goto do_fault; 9051 } 9052 if (type != 1) { 9053 if (desc & (1 << 18)) { 9054 /* Supersection. */ 9055 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 9056 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 9057 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 9058 *page_size = 0x1000000; 9059 } else { 9060 /* Section. */ 9061 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 9062 *page_size = 0x100000; 9063 } 9064 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 9065 xn = desc & (1 << 4); 9066 pxn = desc & 1; 9067 ns = extract32(desc, 19, 1); 9068 } else { 9069 if (arm_feature(env, ARM_FEATURE_PXN)) { 9070 pxn = (desc >> 2) & 1; 9071 } 9072 ns = extract32(desc, 3, 1); 9073 /* Lookup l2 entry. */ 9074 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 9075 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 9076 mmu_idx, fi); 9077 if (fi->type != ARMFault_None) { 9078 goto do_fault; 9079 } 9080 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 9081 switch (desc & 3) { 9082 case 0: /* Page translation fault. */ 9083 fi->type = ARMFault_Translation; 9084 goto do_fault; 9085 case 1: /* 64k page. */ 9086 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 9087 xn = desc & (1 << 15); 9088 *page_size = 0x10000; 9089 break; 9090 case 2: case 3: /* 4k page. */ 9091 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 9092 xn = desc & 1; 9093 *page_size = 0x1000; 9094 break; 9095 default: 9096 /* Never happens, but compiler isn't smart enough to tell. */ 9097 abort(); 9098 } 9099 } 9100 if (domain_prot == 3) { 9101 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 9102 } else { 9103 if (pxn && !regime_is_user(env, mmu_idx)) { 9104 xn = 1; 9105 } 9106 if (xn && access_type == MMU_INST_FETCH) { 9107 fi->type = ARMFault_Permission; 9108 goto do_fault; 9109 } 9110 9111 if (arm_feature(env, ARM_FEATURE_V6K) && 9112 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 9113 /* The simplified model uses AP[0] as an access control bit. */ 9114 if ((ap & 1) == 0) { 9115 /* Access flag fault. */ 9116 fi->type = ARMFault_AccessFlag; 9117 goto do_fault; 9118 } 9119 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 9120 } else { 9121 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 9122 } 9123 if (*prot && !xn) { 9124 *prot |= PAGE_EXEC; 9125 } 9126 if (!(*prot & (1 << access_type))) { 9127 /* Access permission fault. */ 9128 fi->type = ARMFault_Permission; 9129 goto do_fault; 9130 } 9131 } 9132 if (ns) { 9133 /* The NS bit will (as required by the architecture) have no effect if 9134 * the CPU doesn't support TZ or this is a non-secure translation 9135 * regime, because the attribute will already be non-secure. 9136 */ 9137 attrs->secure = false; 9138 } 9139 *phys_ptr = phys_addr; 9140 return false; 9141 do_fault: 9142 fi->domain = domain; 9143 fi->level = level; 9144 return true; 9145 } 9146 9147 /* 9148 * check_s2_mmu_setup 9149 * @cpu: ARMCPU 9150 * @is_aa64: True if the translation regime is in AArch64 state 9151 * @startlevel: Suggested starting level 9152 * @inputsize: Bitsize of IPAs 9153 * @stride: Page-table stride (See the ARM ARM) 9154 * 9155 * Returns true if the suggested S2 translation parameters are OK and 9156 * false otherwise. 9157 */ 9158 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, 9159 int inputsize, int stride) 9160 { 9161 const int grainsize = stride + 3; 9162 int startsizecheck; 9163 9164 /* Negative levels are never allowed. */ 9165 if (level < 0) { 9166 return false; 9167 } 9168 9169 startsizecheck = inputsize - ((3 - level) * stride + grainsize); 9170 if (startsizecheck < 1 || startsizecheck > stride + 4) { 9171 return false; 9172 } 9173 9174 if (is_aa64) { 9175 CPUARMState *env = &cpu->env; 9176 unsigned int pamax = arm_pamax(cpu); 9177 9178 switch (stride) { 9179 case 13: /* 64KB Pages. */ 9180 if (level == 0 || (level == 1 && pamax <= 42)) { 9181 return false; 9182 } 9183 break; 9184 case 11: /* 16KB Pages. */ 9185 if (level == 0 || (level == 1 && pamax <= 40)) { 9186 return false; 9187 } 9188 break; 9189 case 9: /* 4KB Pages. */ 9190 if (level == 0 && pamax <= 42) { 9191 return false; 9192 } 9193 break; 9194 default: 9195 g_assert_not_reached(); 9196 } 9197 9198 /* Inputsize checks. */ 9199 if (inputsize > pamax && 9200 (arm_el_is_aa64(env, 1) || inputsize > 40)) { 9201 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ 9202 return false; 9203 } 9204 } else { 9205 /* AArch32 only supports 4KB pages. Assert on that. */ 9206 assert(stride == 9); 9207 9208 if (level == 0) { 9209 return false; 9210 } 9211 } 9212 return true; 9213 } 9214 9215 /* Translate from the 4-bit stage 2 representation of 9216 * memory attributes (without cache-allocation hints) to 9217 * the 8-bit representation of the stage 1 MAIR registers 9218 * (which includes allocation hints). 9219 * 9220 * ref: shared/translation/attrs/S2AttrDecode() 9221 * .../S2ConvertAttrsHints() 9222 */ 9223 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) 9224 { 9225 uint8_t hiattr = extract32(s2attrs, 2, 2); 9226 uint8_t loattr = extract32(s2attrs, 0, 2); 9227 uint8_t hihint = 0, lohint = 0; 9228 9229 if (hiattr != 0) { /* normal memory */ 9230 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */ 9231 hiattr = loattr = 1; /* non-cacheable */ 9232 } else { 9233 if (hiattr != 1) { /* Write-through or write-back */ 9234 hihint = 3; /* RW allocate */ 9235 } 9236 if (loattr != 1) { /* Write-through or write-back */ 9237 lohint = 3; /* RW allocate */ 9238 } 9239 } 9240 } 9241 9242 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 9243 } 9244 9245 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 9246 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9247 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 9248 target_ulong *page_size_ptr, 9249 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 9250 { 9251 ARMCPU *cpu = arm_env_get_cpu(env); 9252 CPUState *cs = CPU(cpu); 9253 /* Read an LPAE long-descriptor translation table. */ 9254 ARMFaultType fault_type = ARMFault_Translation; 9255 uint32_t level; 9256 uint32_t epd = 0; 9257 int32_t t0sz, t1sz; 9258 uint32_t tg; 9259 uint64_t ttbr; 9260 int ttbr_select; 9261 hwaddr descaddr, indexmask, indexmask_grainsize; 9262 uint32_t tableattrs; 9263 target_ulong page_size; 9264 uint32_t attrs; 9265 int32_t stride = 9; 9266 int32_t addrsize; 9267 int inputsize; 9268 int32_t tbi = 0; 9269 TCR *tcr = regime_tcr(env, mmu_idx); 9270 int ap, ns, xn, pxn; 9271 uint32_t el = regime_el(env, mmu_idx); 9272 bool ttbr1_valid = true; 9273 uint64_t descaddrmask; 9274 bool aarch64 = arm_el_is_aa64(env, el); 9275 9276 /* TODO: 9277 * This code does not handle the different format TCR for VTCR_EL2. 9278 * This code also does not support shareability levels. 9279 * Attribute and permission bit handling should also be checked when adding 9280 * support for those page table walks. 9281 */ 9282 if (aarch64) { 9283 level = 0; 9284 addrsize = 64; 9285 if (el > 1) { 9286 if (mmu_idx != ARMMMUIdx_S2NS) { 9287 tbi = extract64(tcr->raw_tcr, 20, 1); 9288 } 9289 } else { 9290 if (extract64(address, 55, 1)) { 9291 tbi = extract64(tcr->raw_tcr, 38, 1); 9292 } else { 9293 tbi = extract64(tcr->raw_tcr, 37, 1); 9294 } 9295 } 9296 tbi *= 8; 9297 9298 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it 9299 * invalid. 9300 */ 9301 if (el > 1) { 9302 ttbr1_valid = false; 9303 } 9304 } else { 9305 level = 1; 9306 addrsize = 32; 9307 /* There is no TTBR1 for EL2 */ 9308 if (el == 2) { 9309 ttbr1_valid = false; 9310 } 9311 } 9312 9313 /* Determine whether this address is in the region controlled by 9314 * TTBR0 or TTBR1 (or if it is in neither region and should fault). 9315 * This is a Non-secure PL0/1 stage 1 translation, so controlled by 9316 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32: 9317 */ 9318 if (aarch64) { 9319 /* AArch64 translation. */ 9320 t0sz = extract32(tcr->raw_tcr, 0, 6); 9321 t0sz = MIN(t0sz, 39); 9322 t0sz = MAX(t0sz, 16); 9323 } else if (mmu_idx != ARMMMUIdx_S2NS) { 9324 /* AArch32 stage 1 translation. */ 9325 t0sz = extract32(tcr->raw_tcr, 0, 3); 9326 } else { 9327 /* AArch32 stage 2 translation. */ 9328 bool sext = extract32(tcr->raw_tcr, 4, 1); 9329 bool sign = extract32(tcr->raw_tcr, 3, 1); 9330 /* Address size is 40-bit for a stage 2 translation, 9331 * and t0sz can be negative (from -8 to 7), 9332 * so we need to adjust it to use the TTBR selecting logic below. 9333 */ 9334 addrsize = 40; 9335 t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8; 9336 9337 /* If the sign-extend bit is not the same as t0sz[3], the result 9338 * is unpredictable. Flag this as a guest error. */ 9339 if (sign != sext) { 9340 qemu_log_mask(LOG_GUEST_ERROR, 9341 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 9342 } 9343 } 9344 t1sz = extract32(tcr->raw_tcr, 16, 6); 9345 if (aarch64) { 9346 t1sz = MIN(t1sz, 39); 9347 t1sz = MAX(t1sz, 16); 9348 } 9349 if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) { 9350 /* there is a ttbr0 region and we are in it (high bits all zero) */ 9351 ttbr_select = 0; 9352 } else if (ttbr1_valid && t1sz && 9353 !extract64(~address, addrsize - t1sz, t1sz - tbi)) { 9354 /* there is a ttbr1 region and we are in it (high bits all one) */ 9355 ttbr_select = 1; 9356 } else if (!t0sz) { 9357 /* ttbr0 region is "everything not in the ttbr1 region" */ 9358 ttbr_select = 0; 9359 } else if (!t1sz && ttbr1_valid) { 9360 /* ttbr1 region is "everything not in the ttbr0 region" */ 9361 ttbr_select = 1; 9362 } else { 9363 /* in the gap between the two regions, this is a Translation fault */ 9364 fault_type = ARMFault_Translation; 9365 goto do_fault; 9366 } 9367 9368 /* Note that QEMU ignores shareability and cacheability attributes, 9369 * so we don't need to do anything with the SH, ORGN, IRGN fields 9370 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 9371 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 9372 * implement any ASID-like capability so we can ignore it (instead 9373 * we will always flush the TLB any time the ASID is changed). 9374 */ 9375 if (ttbr_select == 0) { 9376 ttbr = regime_ttbr(env, mmu_idx, 0); 9377 if (el < 2) { 9378 epd = extract32(tcr->raw_tcr, 7, 1); 9379 } 9380 inputsize = addrsize - t0sz; 9381 9382 tg = extract32(tcr->raw_tcr, 14, 2); 9383 if (tg == 1) { /* 64KB pages */ 9384 stride = 13; 9385 } 9386 if (tg == 2) { /* 16KB pages */ 9387 stride = 11; 9388 } 9389 } else { 9390 /* We should only be here if TTBR1 is valid */ 9391 assert(ttbr1_valid); 9392 9393 ttbr = regime_ttbr(env, mmu_idx, 1); 9394 epd = extract32(tcr->raw_tcr, 23, 1); 9395 inputsize = addrsize - t1sz; 9396 9397 tg = extract32(tcr->raw_tcr, 30, 2); 9398 if (tg == 3) { /* 64KB pages */ 9399 stride = 13; 9400 } 9401 if (tg == 1) { /* 16KB pages */ 9402 stride = 11; 9403 } 9404 } 9405 9406 /* Here we should have set up all the parameters for the translation: 9407 * inputsize, ttbr, epd, stride, tbi 9408 */ 9409 9410 if (epd) { 9411 /* Translation table walk disabled => Translation fault on TLB miss 9412 * Note: This is always 0 on 64-bit EL2 and EL3. 9413 */ 9414 goto do_fault; 9415 } 9416 9417 if (mmu_idx != ARMMMUIdx_S2NS) { 9418 /* The starting level depends on the virtual address size (which can 9419 * be up to 48 bits) and the translation granule size. It indicates 9420 * the number of strides (stride bits at a time) needed to 9421 * consume the bits of the input address. In the pseudocode this is: 9422 * level = 4 - RoundUp((inputsize - grainsize) / stride) 9423 * where their 'inputsize' is our 'inputsize', 'grainsize' is 9424 * our 'stride + 3' and 'stride' is our 'stride'. 9425 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 9426 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 9427 * = 4 - (inputsize - 4) / stride; 9428 */ 9429 level = 4 - (inputsize - 4) / stride; 9430 } else { 9431 /* For stage 2 translations the starting level is specified by the 9432 * VTCR_EL2.SL0 field (whose interpretation depends on the page size) 9433 */ 9434 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); 9435 uint32_t startlevel; 9436 bool ok; 9437 9438 if (!aarch64 || stride == 9) { 9439 /* AArch32 or 4KB pages */ 9440 startlevel = 2 - sl0; 9441 } else { 9442 /* 16KB or 64KB pages */ 9443 startlevel = 3 - sl0; 9444 } 9445 9446 /* Check that the starting level is valid. */ 9447 ok = check_s2_mmu_setup(cpu, aarch64, startlevel, 9448 inputsize, stride); 9449 if (!ok) { 9450 fault_type = ARMFault_Translation; 9451 goto do_fault; 9452 } 9453 level = startlevel; 9454 } 9455 9456 indexmask_grainsize = (1ULL << (stride + 3)) - 1; 9457 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; 9458 9459 /* Now we can extract the actual base address from the TTBR */ 9460 descaddr = extract64(ttbr, 0, 48); 9461 descaddr &= ~indexmask; 9462 9463 /* The address field in the descriptor goes up to bit 39 for ARMv7 9464 * but up to bit 47 for ARMv8, but we use the descaddrmask 9465 * up to bit 39 for AArch32, because we don't need other bits in that case 9466 * to construct next descriptor address (anyway they should be all zeroes). 9467 */ 9468 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & 9469 ~indexmask_grainsize; 9470 9471 /* Secure accesses start with the page table in secure memory and 9472 * can be downgraded to non-secure at any step. Non-secure accesses 9473 * remain non-secure. We implement this by just ORing in the NSTable/NS 9474 * bits at each step. 9475 */ 9476 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); 9477 for (;;) { 9478 uint64_t descriptor; 9479 bool nstable; 9480 9481 descaddr |= (address >> (stride * (4 - level))) & indexmask; 9482 descaddr &= ~7ULL; 9483 nstable = extract32(tableattrs, 4, 1); 9484 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); 9485 if (fi->type != ARMFault_None) { 9486 goto do_fault; 9487 } 9488 9489 if (!(descriptor & 1) || 9490 (!(descriptor & 2) && (level == 3))) { 9491 /* Invalid, or the Reserved level 3 encoding */ 9492 goto do_fault; 9493 } 9494 descaddr = descriptor & descaddrmask; 9495 9496 if ((descriptor & 2) && (level < 3)) { 9497 /* Table entry. The top five bits are attributes which may 9498 * propagate down through lower levels of the table (and 9499 * which are all arranged so that 0 means "no effect", so 9500 * we can gather them up by ORing in the bits at each level). 9501 */ 9502 tableattrs |= extract64(descriptor, 59, 5); 9503 level++; 9504 indexmask = indexmask_grainsize; 9505 continue; 9506 } 9507 /* Block entry at level 1 or 2, or page entry at level 3. 9508 * These are basically the same thing, although the number 9509 * of bits we pull in from the vaddr varies. 9510 */ 9511 page_size = (1ULL << ((stride * (4 - level)) + 3)); 9512 descaddr |= (address & (page_size - 1)); 9513 /* Extract attributes from the descriptor */ 9514 attrs = extract64(descriptor, 2, 10) 9515 | (extract64(descriptor, 52, 12) << 10); 9516 9517 if (mmu_idx == ARMMMUIdx_S2NS) { 9518 /* Stage 2 table descriptors do not include any attribute fields */ 9519 break; 9520 } 9521 /* Merge in attributes from table descriptors */ 9522 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ 9523 attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */ 9524 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 9525 * means "force PL1 access only", which means forcing AP[1] to 0. 9526 */ 9527 if (extract32(tableattrs, 2, 1)) { 9528 attrs &= ~(1 << 4); 9529 } 9530 attrs |= nstable << 3; /* NS */ 9531 break; 9532 } 9533 /* Here descaddr is the final physical address, and attributes 9534 * are all in attrs. 9535 */ 9536 fault_type = ARMFault_AccessFlag; 9537 if ((attrs & (1 << 8)) == 0) { 9538 /* Access flag */ 9539 goto do_fault; 9540 } 9541 9542 ap = extract32(attrs, 4, 2); 9543 xn = extract32(attrs, 12, 1); 9544 9545 if (mmu_idx == ARMMMUIdx_S2NS) { 9546 ns = true; 9547 *prot = get_S2prot(env, ap, xn); 9548 } else { 9549 ns = extract32(attrs, 3, 1); 9550 pxn = extract32(attrs, 11, 1); 9551 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); 9552 } 9553 9554 fault_type = ARMFault_Permission; 9555 if (!(*prot & (1 << access_type))) { 9556 goto do_fault; 9557 } 9558 9559 if (ns) { 9560 /* The NS bit will (as required by the architecture) have no effect if 9561 * the CPU doesn't support TZ or this is a non-secure translation 9562 * regime, because the attribute will already be non-secure. 9563 */ 9564 txattrs->secure = false; 9565 } 9566 9567 if (cacheattrs != NULL) { 9568 if (mmu_idx == ARMMMUIdx_S2NS) { 9569 cacheattrs->attrs = convert_stage2_attrs(env, 9570 extract32(attrs, 0, 4)); 9571 } else { 9572 /* Index into MAIR registers for cache attributes */ 9573 uint8_t attrindx = extract32(attrs, 0, 3); 9574 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 9575 assert(attrindx <= 7); 9576 cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 9577 } 9578 cacheattrs->shareability = extract32(attrs, 6, 2); 9579 } 9580 9581 *phys_ptr = descaddr; 9582 *page_size_ptr = page_size; 9583 return false; 9584 9585 do_fault: 9586 fi->type = fault_type; 9587 fi->level = level; 9588 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ 9589 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS); 9590 return true; 9591 } 9592 9593 static inline void get_phys_addr_pmsav7_default(CPUARMState *env, 9594 ARMMMUIdx mmu_idx, 9595 int32_t address, int *prot) 9596 { 9597 if (!arm_feature(env, ARM_FEATURE_M)) { 9598 *prot = PAGE_READ | PAGE_WRITE; 9599 switch (address) { 9600 case 0xF0000000 ... 0xFFFFFFFF: 9601 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 9602 /* hivecs execing is ok */ 9603 *prot |= PAGE_EXEC; 9604 } 9605 break; 9606 case 0x00000000 ... 0x7FFFFFFF: 9607 *prot |= PAGE_EXEC; 9608 break; 9609 } 9610 } else { 9611 /* Default system address map for M profile cores. 9612 * The architecture specifies which regions are execute-never; 9613 * at the MPU level no other checks are defined. 9614 */ 9615 switch (address) { 9616 case 0x00000000 ... 0x1fffffff: /* ROM */ 9617 case 0x20000000 ... 0x3fffffff: /* SRAM */ 9618 case 0x60000000 ... 0x7fffffff: /* RAM */ 9619 case 0x80000000 ... 0x9fffffff: /* RAM */ 9620 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 9621 break; 9622 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 9623 case 0xa0000000 ... 0xbfffffff: /* Device */ 9624 case 0xc0000000 ... 0xdfffffff: /* Device */ 9625 case 0xe0000000 ... 0xffffffff: /* System */ 9626 *prot = PAGE_READ | PAGE_WRITE; 9627 break; 9628 default: 9629 g_assert_not_reached(); 9630 } 9631 } 9632 } 9633 9634 static bool pmsav7_use_background_region(ARMCPU *cpu, 9635 ARMMMUIdx mmu_idx, bool is_user) 9636 { 9637 /* Return true if we should use the default memory map as a 9638 * "background" region if there are no hits against any MPU regions. 9639 */ 9640 CPUARMState *env = &cpu->env; 9641 9642 if (is_user) { 9643 return false; 9644 } 9645 9646 if (arm_feature(env, ARM_FEATURE_M)) { 9647 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] 9648 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 9649 } else { 9650 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 9651 } 9652 } 9653 9654 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) 9655 { 9656 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 9657 return arm_feature(env, ARM_FEATURE_M) && 9658 extract32(address, 20, 12) == 0xe00; 9659 } 9660 9661 static inline bool m_is_system_region(CPUARMState *env, uint32_t address) 9662 { 9663 /* True if address is in the M profile system region 9664 * 0xe0000000 - 0xffffffff 9665 */ 9666 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 9667 } 9668 9669 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, 9670 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9671 hwaddr *phys_ptr, int *prot, 9672 target_ulong *page_size, 9673 ARMMMUFaultInfo *fi) 9674 { 9675 ARMCPU *cpu = arm_env_get_cpu(env); 9676 int n; 9677 bool is_user = regime_is_user(env, mmu_idx); 9678 9679 *phys_ptr = address; 9680 *page_size = TARGET_PAGE_SIZE; 9681 *prot = 0; 9682 9683 if (regime_translation_disabled(env, mmu_idx) || 9684 m_is_ppb_region(env, address)) { 9685 /* MPU disabled or M profile PPB access: use default memory map. 9686 * The other case which uses the default memory map in the 9687 * v7M ARM ARM pseudocode is exception vector reads from the vector 9688 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 9689 * which always does a direct read using address_space_ldl(), rather 9690 * than going via this function, so we don't need to check that here. 9691 */ 9692 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 9693 } else { /* MPU enabled */ 9694 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 9695 /* region search */ 9696 uint32_t base = env->pmsav7.drbar[n]; 9697 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 9698 uint32_t rmask; 9699 bool srdis = false; 9700 9701 if (!(env->pmsav7.drsr[n] & 0x1)) { 9702 continue; 9703 } 9704 9705 if (!rsize) { 9706 qemu_log_mask(LOG_GUEST_ERROR, 9707 "DRSR[%d]: Rsize field cannot be 0\n", n); 9708 continue; 9709 } 9710 rsize++; 9711 rmask = (1ull << rsize) - 1; 9712 9713 if (base & rmask) { 9714 qemu_log_mask(LOG_GUEST_ERROR, 9715 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 9716 "to DRSR region size, mask = 0x%" PRIx32 "\n", 9717 n, base, rmask); 9718 continue; 9719 } 9720 9721 if (address < base || address > base + rmask) { 9722 /* 9723 * Address not in this region. We must check whether the 9724 * region covers addresses in the same page as our address. 9725 * In that case we must not report a size that covers the 9726 * whole page for a subsequent hit against a different MPU 9727 * region or the background region, because it would result in 9728 * incorrect TLB hits for subsequent accesses to addresses that 9729 * are in this MPU region. 9730 */ 9731 if (ranges_overlap(base, rmask, 9732 address & TARGET_PAGE_MASK, 9733 TARGET_PAGE_SIZE)) { 9734 *page_size = 1; 9735 } 9736 continue; 9737 } 9738 9739 /* Region matched */ 9740 9741 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 9742 int i, snd; 9743 uint32_t srdis_mask; 9744 9745 rsize -= 3; /* sub region size (power of 2) */ 9746 snd = ((address - base) >> rsize) & 0x7; 9747 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 9748 9749 srdis_mask = srdis ? 0x3 : 0x0; 9750 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 9751 /* This will check in groups of 2, 4 and then 8, whether 9752 * the subregion bits are consistent. rsize is incremented 9753 * back up to give the region size, considering consistent 9754 * adjacent subregions as one region. Stop testing if rsize 9755 * is already big enough for an entire QEMU page. 9756 */ 9757 int snd_rounded = snd & ~(i - 1); 9758 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 9759 snd_rounded + 8, i); 9760 if (srdis_mask ^ srdis_multi) { 9761 break; 9762 } 9763 srdis_mask = (srdis_mask << i) | srdis_mask; 9764 rsize++; 9765 } 9766 } 9767 if (srdis) { 9768 continue; 9769 } 9770 if (rsize < TARGET_PAGE_BITS) { 9771 *page_size = 1 << rsize; 9772 } 9773 break; 9774 } 9775 9776 if (n == -1) { /* no hits */ 9777 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 9778 /* background fault */ 9779 fi->type = ARMFault_Background; 9780 return true; 9781 } 9782 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 9783 } else { /* a MPU hit! */ 9784 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 9785 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 9786 9787 if (m_is_system_region(env, address)) { 9788 /* System space is always execute never */ 9789 xn = 1; 9790 } 9791 9792 if (is_user) { /* User mode AP bit decoding */ 9793 switch (ap) { 9794 case 0: 9795 case 1: 9796 case 5: 9797 break; /* no access */ 9798 case 3: 9799 *prot |= PAGE_WRITE; 9800 /* fall through */ 9801 case 2: 9802 case 6: 9803 *prot |= PAGE_READ | PAGE_EXEC; 9804 break; 9805 case 7: 9806 /* for v7M, same as 6; for R profile a reserved value */ 9807 if (arm_feature(env, ARM_FEATURE_M)) { 9808 *prot |= PAGE_READ | PAGE_EXEC; 9809 break; 9810 } 9811 /* fall through */ 9812 default: 9813 qemu_log_mask(LOG_GUEST_ERROR, 9814 "DRACR[%d]: Bad value for AP bits: 0x%" 9815 PRIx32 "\n", n, ap); 9816 } 9817 } else { /* Priv. mode AP bits decoding */ 9818 switch (ap) { 9819 case 0: 9820 break; /* no access */ 9821 case 1: 9822 case 2: 9823 case 3: 9824 *prot |= PAGE_WRITE; 9825 /* fall through */ 9826 case 5: 9827 case 6: 9828 *prot |= PAGE_READ | PAGE_EXEC; 9829 break; 9830 case 7: 9831 /* for v7M, same as 6; for R profile a reserved value */ 9832 if (arm_feature(env, ARM_FEATURE_M)) { 9833 *prot |= PAGE_READ | PAGE_EXEC; 9834 break; 9835 } 9836 /* fall through */ 9837 default: 9838 qemu_log_mask(LOG_GUEST_ERROR, 9839 "DRACR[%d]: Bad value for AP bits: 0x%" 9840 PRIx32 "\n", n, ap); 9841 } 9842 } 9843 9844 /* execute never */ 9845 if (xn) { 9846 *prot &= ~PAGE_EXEC; 9847 } 9848 } 9849 } 9850 9851 fi->type = ARMFault_Permission; 9852 fi->level = 1; 9853 return !(*prot & (1 << access_type)); 9854 } 9855 9856 static bool v8m_is_sau_exempt(CPUARMState *env, 9857 uint32_t address, MMUAccessType access_type) 9858 { 9859 /* The architecture specifies that certain address ranges are 9860 * exempt from v8M SAU/IDAU checks. 9861 */ 9862 return 9863 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 9864 (address >= 0xe0000000 && address <= 0xe0002fff) || 9865 (address >= 0xe000e000 && address <= 0xe000efff) || 9866 (address >= 0xe002e000 && address <= 0xe002efff) || 9867 (address >= 0xe0040000 && address <= 0xe0041fff) || 9868 (address >= 0xe00ff000 && address <= 0xe00fffff); 9869 } 9870 9871 static void v8m_security_lookup(CPUARMState *env, uint32_t address, 9872 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9873 V8M_SAttributes *sattrs) 9874 { 9875 /* Look up the security attributes for this address. Compare the 9876 * pseudocode SecurityCheck() function. 9877 * We assume the caller has zero-initialized *sattrs. 9878 */ 9879 ARMCPU *cpu = arm_env_get_cpu(env); 9880 int r; 9881 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 9882 int idau_region = IREGION_NOTVALID; 9883 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 9884 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 9885 9886 if (cpu->idau) { 9887 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 9888 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 9889 9890 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 9891 &idau_nsc); 9892 } 9893 9894 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 9895 /* 0xf0000000..0xffffffff is always S for insn fetches */ 9896 return; 9897 } 9898 9899 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 9900 sattrs->ns = !regime_is_secure(env, mmu_idx); 9901 return; 9902 } 9903 9904 if (idau_region != IREGION_NOTVALID) { 9905 sattrs->irvalid = true; 9906 sattrs->iregion = idau_region; 9907 } 9908 9909 switch (env->sau.ctrl & 3) { 9910 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 9911 break; 9912 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 9913 sattrs->ns = true; 9914 break; 9915 default: /* SAU.ENABLE == 1 */ 9916 for (r = 0; r < cpu->sau_sregion; r++) { 9917 if (env->sau.rlar[r] & 1) { 9918 uint32_t base = env->sau.rbar[r] & ~0x1f; 9919 uint32_t limit = env->sau.rlar[r] | 0x1f; 9920 9921 if (base <= address && limit >= address) { 9922 if (base > addr_page_base || limit < addr_page_limit) { 9923 sattrs->subpage = true; 9924 } 9925 if (sattrs->srvalid) { 9926 /* If we hit in more than one region then we must report 9927 * as Secure, not NS-Callable, with no valid region 9928 * number info. 9929 */ 9930 sattrs->ns = false; 9931 sattrs->nsc = false; 9932 sattrs->sregion = 0; 9933 sattrs->srvalid = false; 9934 break; 9935 } else { 9936 if (env->sau.rlar[r] & 2) { 9937 sattrs->nsc = true; 9938 } else { 9939 sattrs->ns = true; 9940 } 9941 sattrs->srvalid = true; 9942 sattrs->sregion = r; 9943 } 9944 } else { 9945 /* 9946 * Address not in this region. We must check whether the 9947 * region covers addresses in the same page as our address. 9948 * In that case we must not report a size that covers the 9949 * whole page for a subsequent hit against a different MPU 9950 * region or the background region, because it would result 9951 * in incorrect TLB hits for subsequent accesses to 9952 * addresses that are in this MPU region. 9953 */ 9954 if (limit >= base && 9955 ranges_overlap(base, limit - base + 1, 9956 addr_page_base, 9957 TARGET_PAGE_SIZE)) { 9958 sattrs->subpage = true; 9959 } 9960 } 9961 } 9962 } 9963 9964 /* The IDAU will override the SAU lookup results if it specifies 9965 * higher security than the SAU does. 9966 */ 9967 if (!idau_ns) { 9968 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 9969 sattrs->ns = false; 9970 sattrs->nsc = idau_nsc; 9971 } 9972 } 9973 break; 9974 } 9975 } 9976 9977 static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 9978 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9979 hwaddr *phys_ptr, MemTxAttrs *txattrs, 9980 int *prot, bool *is_subpage, 9981 ARMMMUFaultInfo *fi, uint32_t *mregion) 9982 { 9983 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check 9984 * that a full phys-to-virt translation does). 9985 * mregion is (if not NULL) set to the region number which matched, 9986 * or -1 if no region number is returned (MPU off, address did not 9987 * hit a region, address hit in multiple regions). 9988 * We set is_subpage to true if the region hit doesn't cover the 9989 * entire TARGET_PAGE the address is within. 9990 */ 9991 ARMCPU *cpu = arm_env_get_cpu(env); 9992 bool is_user = regime_is_user(env, mmu_idx); 9993 uint32_t secure = regime_is_secure(env, mmu_idx); 9994 int n; 9995 int matchregion = -1; 9996 bool hit = false; 9997 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 9998 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 9999 10000 *is_subpage = false; 10001 *phys_ptr = address; 10002 *prot = 0; 10003 if (mregion) { 10004 *mregion = -1; 10005 } 10006 10007 /* Unlike the ARM ARM pseudocode, we don't need to check whether this 10008 * was an exception vector read from the vector table (which is always 10009 * done using the default system address map), because those accesses 10010 * are done in arm_v7m_load_vector(), which always does a direct 10011 * read using address_space_ldl(), rather than going via this function. 10012 */ 10013 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ 10014 hit = true; 10015 } else if (m_is_ppb_region(env, address)) { 10016 hit = true; 10017 } else if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 10018 hit = true; 10019 } else { 10020 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 10021 /* region search */ 10022 /* Note that the base address is bits [31:5] from the register 10023 * with bits [4:0] all zeroes, but the limit address is bits 10024 * [31:5] from the register with bits [4:0] all ones. 10025 */ 10026 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; 10027 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; 10028 10029 if (!(env->pmsav8.rlar[secure][n] & 0x1)) { 10030 /* Region disabled */ 10031 continue; 10032 } 10033 10034 if (address < base || address > limit) { 10035 /* 10036 * Address not in this region. We must check whether the 10037 * region covers addresses in the same page as our address. 10038 * In that case we must not report a size that covers the 10039 * whole page for a subsequent hit against a different MPU 10040 * region or the background region, because it would result in 10041 * incorrect TLB hits for subsequent accesses to addresses that 10042 * are in this MPU region. 10043 */ 10044 if (limit >= base && 10045 ranges_overlap(base, limit - base + 1, 10046 addr_page_base, 10047 TARGET_PAGE_SIZE)) { 10048 *is_subpage = true; 10049 } 10050 continue; 10051 } 10052 10053 if (base > addr_page_base || limit < addr_page_limit) { 10054 *is_subpage = true; 10055 } 10056 10057 if (hit) { 10058 /* Multiple regions match -- always a failure (unlike 10059 * PMSAv7 where highest-numbered-region wins) 10060 */ 10061 fi->type = ARMFault_Permission; 10062 fi->level = 1; 10063 return true; 10064 } 10065 10066 matchregion = n; 10067 hit = true; 10068 } 10069 } 10070 10071 if (!hit) { 10072 /* background fault */ 10073 fi->type = ARMFault_Background; 10074 return true; 10075 } 10076 10077 if (matchregion == -1) { 10078 /* hit using the background region */ 10079 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 10080 } else { 10081 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); 10082 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); 10083 10084 if (m_is_system_region(env, address)) { 10085 /* System space is always execute never */ 10086 xn = 1; 10087 } 10088 10089 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 10090 if (*prot && !xn) { 10091 *prot |= PAGE_EXEC; 10092 } 10093 /* We don't need to look the attribute up in the MAIR0/MAIR1 10094 * registers because that only tells us about cacheability. 10095 */ 10096 if (mregion) { 10097 *mregion = matchregion; 10098 } 10099 } 10100 10101 fi->type = ARMFault_Permission; 10102 fi->level = 1; 10103 return !(*prot & (1 << access_type)); 10104 } 10105 10106 10107 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, 10108 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10109 hwaddr *phys_ptr, MemTxAttrs *txattrs, 10110 int *prot, target_ulong *page_size, 10111 ARMMMUFaultInfo *fi) 10112 { 10113 uint32_t secure = regime_is_secure(env, mmu_idx); 10114 V8M_SAttributes sattrs = {}; 10115 bool ret; 10116 bool mpu_is_subpage; 10117 10118 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10119 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); 10120 if (access_type == MMU_INST_FETCH) { 10121 /* Instruction fetches always use the MMU bank and the 10122 * transaction attribute determined by the fetch address, 10123 * regardless of CPU state. This is painful for QEMU 10124 * to handle, because it would mean we need to encode 10125 * into the mmu_idx not just the (user, negpri) information 10126 * for the current security state but also that for the 10127 * other security state, which would balloon the number 10128 * of mmu_idx values needed alarmingly. 10129 * Fortunately we can avoid this because it's not actually 10130 * possible to arbitrarily execute code from memory with 10131 * the wrong security attribute: it will always generate 10132 * an exception of some kind or another, apart from the 10133 * special case of an NS CPU executing an SG instruction 10134 * in S&NSC memory. So we always just fail the translation 10135 * here and sort things out in the exception handler 10136 * (including possibly emulating an SG instruction). 10137 */ 10138 if (sattrs.ns != !secure) { 10139 if (sattrs.nsc) { 10140 fi->type = ARMFault_QEMU_NSCExec; 10141 } else { 10142 fi->type = ARMFault_QEMU_SFault; 10143 } 10144 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 10145 *phys_ptr = address; 10146 *prot = 0; 10147 return true; 10148 } 10149 } else { 10150 /* For data accesses we always use the MMU bank indicated 10151 * by the current CPU state, but the security attributes 10152 * might downgrade a secure access to nonsecure. 10153 */ 10154 if (sattrs.ns) { 10155 txattrs->secure = false; 10156 } else if (!secure) { 10157 /* NS access to S memory must fault. 10158 * Architecturally we should first check whether the 10159 * MPU information for this address indicates that we 10160 * are doing an unaligned access to Device memory, which 10161 * should generate a UsageFault instead. QEMU does not 10162 * currently check for that kind of unaligned access though. 10163 * If we added it we would need to do so as a special case 10164 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 10165 */ 10166 fi->type = ARMFault_QEMU_SFault; 10167 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 10168 *phys_ptr = address; 10169 *prot = 0; 10170 return true; 10171 } 10172 } 10173 } 10174 10175 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, 10176 txattrs, prot, &mpu_is_subpage, fi, NULL); 10177 /* 10178 * TODO: this is a temporary hack to ignore the fact that the SAU region 10179 * is smaller than a page if this is an executable region. We never 10180 * supported small MPU regions, but we did (accidentally) allow small 10181 * SAU regions, and if we now made small SAU regions not be executable 10182 * then this would break previously working guest code. We can't 10183 * remove this until/unless we implement support for execution from 10184 * small regions. 10185 */ 10186 if (*prot & PAGE_EXEC) { 10187 sattrs.subpage = false; 10188 } 10189 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; 10190 return ret; 10191 } 10192 10193 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, 10194 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10195 hwaddr *phys_ptr, int *prot, 10196 ARMMMUFaultInfo *fi) 10197 { 10198 int n; 10199 uint32_t mask; 10200 uint32_t base; 10201 bool is_user = regime_is_user(env, mmu_idx); 10202 10203 if (regime_translation_disabled(env, mmu_idx)) { 10204 /* MPU disabled. */ 10205 *phys_ptr = address; 10206 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10207 return false; 10208 } 10209 10210 *phys_ptr = address; 10211 for (n = 7; n >= 0; n--) { 10212 base = env->cp15.c6_region[n]; 10213 if ((base & 1) == 0) { 10214 continue; 10215 } 10216 mask = 1 << ((base >> 1) & 0x1f); 10217 /* Keep this shift separate from the above to avoid an 10218 (undefined) << 32. */ 10219 mask = (mask << 1) - 1; 10220 if (((base ^ address) & ~mask) == 0) { 10221 break; 10222 } 10223 } 10224 if (n < 0) { 10225 fi->type = ARMFault_Background; 10226 return true; 10227 } 10228 10229 if (access_type == MMU_INST_FETCH) { 10230 mask = env->cp15.pmsav5_insn_ap; 10231 } else { 10232 mask = env->cp15.pmsav5_data_ap; 10233 } 10234 mask = (mask >> (n * 4)) & 0xf; 10235 switch (mask) { 10236 case 0: 10237 fi->type = ARMFault_Permission; 10238 fi->level = 1; 10239 return true; 10240 case 1: 10241 if (is_user) { 10242 fi->type = ARMFault_Permission; 10243 fi->level = 1; 10244 return true; 10245 } 10246 *prot = PAGE_READ | PAGE_WRITE; 10247 break; 10248 case 2: 10249 *prot = PAGE_READ; 10250 if (!is_user) { 10251 *prot |= PAGE_WRITE; 10252 } 10253 break; 10254 case 3: 10255 *prot = PAGE_READ | PAGE_WRITE; 10256 break; 10257 case 5: 10258 if (is_user) { 10259 fi->type = ARMFault_Permission; 10260 fi->level = 1; 10261 return true; 10262 } 10263 *prot = PAGE_READ; 10264 break; 10265 case 6: 10266 *prot = PAGE_READ; 10267 break; 10268 default: 10269 /* Bad permission. */ 10270 fi->type = ARMFault_Permission; 10271 fi->level = 1; 10272 return true; 10273 } 10274 *prot |= PAGE_EXEC; 10275 return false; 10276 } 10277 10278 /* Combine either inner or outer cacheability attributes for normal 10279 * memory, according to table D4-42 and pseudocode procedure 10280 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 10281 * 10282 * NB: only stage 1 includes allocation hints (RW bits), leading to 10283 * some asymmetry. 10284 */ 10285 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 10286 { 10287 if (s1 == 4 || s2 == 4) { 10288 /* non-cacheable has precedence */ 10289 return 4; 10290 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 10291 /* stage 1 write-through takes precedence */ 10292 return s1; 10293 } else if (extract32(s2, 2, 2) == 2) { 10294 /* stage 2 write-through takes precedence, but the allocation hint 10295 * is still taken from stage 1 10296 */ 10297 return (2 << 2) | extract32(s1, 0, 2); 10298 } else { /* write-back */ 10299 return s1; 10300 } 10301 } 10302 10303 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 10304 * and CombineS1S2Desc() 10305 * 10306 * @s1: Attributes from stage 1 walk 10307 * @s2: Attributes from stage 2 walk 10308 */ 10309 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) 10310 { 10311 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4); 10312 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4); 10313 ARMCacheAttrs ret; 10314 10315 /* Combine shareability attributes (table D4-43) */ 10316 if (s1.shareability == 2 || s2.shareability == 2) { 10317 /* if either are outer-shareable, the result is outer-shareable */ 10318 ret.shareability = 2; 10319 } else if (s1.shareability == 3 || s2.shareability == 3) { 10320 /* if either are inner-shareable, the result is inner-shareable */ 10321 ret.shareability = 3; 10322 } else { 10323 /* both non-shareable */ 10324 ret.shareability = 0; 10325 } 10326 10327 /* Combine memory type and cacheability attributes */ 10328 if (s1hi == 0 || s2hi == 0) { 10329 /* Device has precedence over normal */ 10330 if (s1lo == 0 || s2lo == 0) { 10331 /* nGnRnE has precedence over anything */ 10332 ret.attrs = 0; 10333 } else if (s1lo == 4 || s2lo == 4) { 10334 /* non-Reordering has precedence over Reordering */ 10335 ret.attrs = 4; /* nGnRE */ 10336 } else if (s1lo == 8 || s2lo == 8) { 10337 /* non-Gathering has precedence over Gathering */ 10338 ret.attrs = 8; /* nGRE */ 10339 } else { 10340 ret.attrs = 0xc; /* GRE */ 10341 } 10342 10343 /* Any location for which the resultant memory type is any 10344 * type of Device memory is always treated as Outer Shareable. 10345 */ 10346 ret.shareability = 2; 10347 } else { /* Normal memory */ 10348 /* Outer/inner cacheability combine independently */ 10349 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 10350 | combine_cacheattr_nibble(s1lo, s2lo); 10351 10352 if (ret.attrs == 0x44) { 10353 /* Any location for which the resultant memory type is Normal 10354 * Inner Non-cacheable, Outer Non-cacheable is always treated 10355 * as Outer Shareable. 10356 */ 10357 ret.shareability = 2; 10358 } 10359 } 10360 10361 return ret; 10362 } 10363 10364 10365 /* get_phys_addr - get the physical address for this virtual address 10366 * 10367 * Find the physical address corresponding to the given virtual address, 10368 * by doing a translation table walk on MMU based systems or using the 10369 * MPU state on MPU based systems. 10370 * 10371 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 10372 * prot and page_size may not be filled in, and the populated fsr value provides 10373 * information on why the translation aborted, in the format of a 10374 * DFSR/IFSR fault register, with the following caveats: 10375 * * we honour the short vs long DFSR format differences. 10376 * * the WnR bit is never set (the caller must do this). 10377 * * for PSMAv5 based systems we don't bother to return a full FSR format 10378 * value. 10379 * 10380 * @env: CPUARMState 10381 * @address: virtual address to get physical address for 10382 * @access_type: 0 for read, 1 for write, 2 for execute 10383 * @mmu_idx: MMU index indicating required translation regime 10384 * @phys_ptr: set to the physical address corresponding to the virtual address 10385 * @attrs: set to the memory transaction attributes to use 10386 * @prot: set to the permissions for the page containing phys_ptr 10387 * @page_size: set to the size of the page containing phys_ptr 10388 * @fi: set to fault info if the translation fails 10389 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 10390 */ 10391 static bool get_phys_addr(CPUARMState *env, target_ulong address, 10392 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10393 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 10394 target_ulong *page_size, 10395 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 10396 { 10397 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 10398 /* Call ourselves recursively to do the stage 1 and then stage 2 10399 * translations. 10400 */ 10401 if (arm_feature(env, ARM_FEATURE_EL2)) { 10402 hwaddr ipa; 10403 int s2_prot; 10404 int ret; 10405 ARMCacheAttrs cacheattrs2 = {}; 10406 10407 ret = get_phys_addr(env, address, access_type, 10408 stage_1_mmu_idx(mmu_idx), &ipa, attrs, 10409 prot, page_size, fi, cacheattrs); 10410 10411 /* If S1 fails or S2 is disabled, return early. */ 10412 if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) { 10413 *phys_ptr = ipa; 10414 return ret; 10415 } 10416 10417 /* S1 is done. Now do S2 translation. */ 10418 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS, 10419 phys_ptr, attrs, &s2_prot, 10420 page_size, fi, 10421 cacheattrs != NULL ? &cacheattrs2 : NULL); 10422 fi->s2addr = ipa; 10423 /* Combine the S1 and S2 perms. */ 10424 *prot &= s2_prot; 10425 10426 /* Combine the S1 and S2 cache attributes, if needed */ 10427 if (!ret && cacheattrs != NULL) { 10428 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 10429 } 10430 10431 return ret; 10432 } else { 10433 /* 10434 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. 10435 */ 10436 mmu_idx = stage_1_mmu_idx(mmu_idx); 10437 } 10438 } 10439 10440 /* The page table entries may downgrade secure to non-secure, but 10441 * cannot upgrade an non-secure translation regime's attributes 10442 * to secure. 10443 */ 10444 attrs->secure = regime_is_secure(env, mmu_idx); 10445 attrs->user = regime_is_user(env, mmu_idx); 10446 10447 /* Fast Context Switch Extension. This doesn't exist at all in v8. 10448 * In v7 and earlier it affects all stage 1 translations. 10449 */ 10450 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS 10451 && !arm_feature(env, ARM_FEATURE_V8)) { 10452 if (regime_el(env, mmu_idx) == 3) { 10453 address += env->cp15.fcseidr_s; 10454 } else { 10455 address += env->cp15.fcseidr_ns; 10456 } 10457 } 10458 10459 if (arm_feature(env, ARM_FEATURE_PMSA)) { 10460 bool ret; 10461 *page_size = TARGET_PAGE_SIZE; 10462 10463 if (arm_feature(env, ARM_FEATURE_V8)) { 10464 /* PMSAv8 */ 10465 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, 10466 phys_ptr, attrs, prot, page_size, fi); 10467 } else if (arm_feature(env, ARM_FEATURE_V7)) { 10468 /* PMSAv7 */ 10469 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, 10470 phys_ptr, prot, page_size, fi); 10471 } else { 10472 /* Pre-v7 MPU */ 10473 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, 10474 phys_ptr, prot, fi); 10475 } 10476 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 10477 " mmu_idx %u -> %s (prot %c%c%c)\n", 10478 access_type == MMU_DATA_LOAD ? "reading" : 10479 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 10480 (uint32_t)address, mmu_idx, 10481 ret ? "Miss" : "Hit", 10482 *prot & PAGE_READ ? 'r' : '-', 10483 *prot & PAGE_WRITE ? 'w' : '-', 10484 *prot & PAGE_EXEC ? 'x' : '-'); 10485 10486 return ret; 10487 } 10488 10489 /* Definitely a real MMU, not an MPU */ 10490 10491 if (regime_translation_disabled(env, mmu_idx)) { 10492 /* MMU disabled. */ 10493 *phys_ptr = address; 10494 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10495 *page_size = TARGET_PAGE_SIZE; 10496 return 0; 10497 } 10498 10499 if (regime_using_lpae_format(env, mmu_idx)) { 10500 return get_phys_addr_lpae(env, address, access_type, mmu_idx, 10501 phys_ptr, attrs, prot, page_size, 10502 fi, cacheattrs); 10503 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { 10504 return get_phys_addr_v6(env, address, access_type, mmu_idx, 10505 phys_ptr, attrs, prot, page_size, fi); 10506 } else { 10507 return get_phys_addr_v5(env, address, access_type, mmu_idx, 10508 phys_ptr, prot, page_size, fi); 10509 } 10510 } 10511 10512 /* Walk the page table and (if the mapping exists) add the page 10513 * to the TLB. Return false on success, or true on failure. Populate 10514 * fsr with ARM DFSR/IFSR fault register format value on failure. 10515 */ 10516 bool arm_tlb_fill(CPUState *cs, vaddr address, 10517 MMUAccessType access_type, int mmu_idx, 10518 ARMMMUFaultInfo *fi) 10519 { 10520 ARMCPU *cpu = ARM_CPU(cs); 10521 CPUARMState *env = &cpu->env; 10522 hwaddr phys_addr; 10523 target_ulong page_size; 10524 int prot; 10525 int ret; 10526 MemTxAttrs attrs = {}; 10527 10528 ret = get_phys_addr(env, address, access_type, 10529 core_to_arm_mmu_idx(env, mmu_idx), &phys_addr, 10530 &attrs, &prot, &page_size, fi, NULL); 10531 if (!ret) { 10532 /* 10533 * Map a single [sub]page. Regions smaller than our declared 10534 * target page size are handled specially, so for those we 10535 * pass in the exact addresses. 10536 */ 10537 if (page_size >= TARGET_PAGE_SIZE) { 10538 phys_addr &= TARGET_PAGE_MASK; 10539 address &= TARGET_PAGE_MASK; 10540 } 10541 tlb_set_page_with_attrs(cs, address, phys_addr, attrs, 10542 prot, mmu_idx, page_size); 10543 return 0; 10544 } 10545 10546 return ret; 10547 } 10548 10549 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 10550 MemTxAttrs *attrs) 10551 { 10552 ARMCPU *cpu = ARM_CPU(cs); 10553 CPUARMState *env = &cpu->env; 10554 hwaddr phys_addr; 10555 target_ulong page_size; 10556 int prot; 10557 bool ret; 10558 ARMMMUFaultInfo fi = {}; 10559 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 10560 10561 *attrs = (MemTxAttrs) {}; 10562 10563 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, 10564 attrs, &prot, &page_size, &fi, NULL); 10565 10566 if (ret) { 10567 return -1; 10568 } 10569 return phys_addr; 10570 } 10571 10572 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 10573 { 10574 uint32_t mask; 10575 unsigned el = arm_current_el(env); 10576 10577 /* First handle registers which unprivileged can read */ 10578 10579 switch (reg) { 10580 case 0 ... 7: /* xPSR sub-fields */ 10581 mask = 0; 10582 if ((reg & 1) && el) { 10583 mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */ 10584 } 10585 if (!(reg & 4)) { 10586 mask |= XPSR_NZCV | XPSR_Q; /* APSR */ 10587 } 10588 /* EPSR reads as zero */ 10589 return xpsr_read(env) & mask; 10590 break; 10591 case 20: /* CONTROL */ 10592 return env->v7m.control[env->v7m.secure]; 10593 case 0x94: /* CONTROL_NS */ 10594 /* We have to handle this here because unprivileged Secure code 10595 * can read the NS CONTROL register. 10596 */ 10597 if (!env->v7m.secure) { 10598 return 0; 10599 } 10600 return env->v7m.control[M_REG_NS]; 10601 } 10602 10603 if (el == 0) { 10604 return 0; /* unprivileged reads others as zero */ 10605 } 10606 10607 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10608 switch (reg) { 10609 case 0x88: /* MSP_NS */ 10610 if (!env->v7m.secure) { 10611 return 0; 10612 } 10613 return env->v7m.other_ss_msp; 10614 case 0x89: /* PSP_NS */ 10615 if (!env->v7m.secure) { 10616 return 0; 10617 } 10618 return env->v7m.other_ss_psp; 10619 case 0x8a: /* MSPLIM_NS */ 10620 if (!env->v7m.secure) { 10621 return 0; 10622 } 10623 return env->v7m.msplim[M_REG_NS]; 10624 case 0x8b: /* PSPLIM_NS */ 10625 if (!env->v7m.secure) { 10626 return 0; 10627 } 10628 return env->v7m.psplim[M_REG_NS]; 10629 case 0x90: /* PRIMASK_NS */ 10630 if (!env->v7m.secure) { 10631 return 0; 10632 } 10633 return env->v7m.primask[M_REG_NS]; 10634 case 0x91: /* BASEPRI_NS */ 10635 if (!env->v7m.secure) { 10636 return 0; 10637 } 10638 return env->v7m.basepri[M_REG_NS]; 10639 case 0x93: /* FAULTMASK_NS */ 10640 if (!env->v7m.secure) { 10641 return 0; 10642 } 10643 return env->v7m.faultmask[M_REG_NS]; 10644 case 0x98: /* SP_NS */ 10645 { 10646 /* This gives the non-secure SP selected based on whether we're 10647 * currently in handler mode or not, using the NS CONTROL.SPSEL. 10648 */ 10649 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; 10650 10651 if (!env->v7m.secure) { 10652 return 0; 10653 } 10654 if (!arm_v7m_is_handler_mode(env) && spsel) { 10655 return env->v7m.other_ss_psp; 10656 } else { 10657 return env->v7m.other_ss_msp; 10658 } 10659 } 10660 default: 10661 break; 10662 } 10663 } 10664 10665 switch (reg) { 10666 case 8: /* MSP */ 10667 return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13]; 10668 case 9: /* PSP */ 10669 return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp; 10670 case 10: /* MSPLIM */ 10671 if (!arm_feature(env, ARM_FEATURE_V8)) { 10672 goto bad_reg; 10673 } 10674 return env->v7m.msplim[env->v7m.secure]; 10675 case 11: /* PSPLIM */ 10676 if (!arm_feature(env, ARM_FEATURE_V8)) { 10677 goto bad_reg; 10678 } 10679 return env->v7m.psplim[env->v7m.secure]; 10680 case 16: /* PRIMASK */ 10681 return env->v7m.primask[env->v7m.secure]; 10682 case 17: /* BASEPRI */ 10683 case 18: /* BASEPRI_MAX */ 10684 return env->v7m.basepri[env->v7m.secure]; 10685 case 19: /* FAULTMASK */ 10686 return env->v7m.faultmask[env->v7m.secure]; 10687 default: 10688 bad_reg: 10689 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special" 10690 " register %d\n", reg); 10691 return 0; 10692 } 10693 } 10694 10695 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) 10696 { 10697 /* We're passed bits [11..0] of the instruction; extract 10698 * SYSm and the mask bits. 10699 * Invalid combinations of SYSm and mask are UNPREDICTABLE; 10700 * we choose to treat them as if the mask bits were valid. 10701 * NB that the pseudocode 'mask' variable is bits [11..10], 10702 * whereas ours is [11..8]. 10703 */ 10704 uint32_t mask = extract32(maskreg, 8, 4); 10705 uint32_t reg = extract32(maskreg, 0, 8); 10706 10707 if (arm_current_el(env) == 0 && reg > 7) { 10708 /* only xPSR sub-fields may be written by unprivileged */ 10709 return; 10710 } 10711 10712 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10713 switch (reg) { 10714 case 0x88: /* MSP_NS */ 10715 if (!env->v7m.secure) { 10716 return; 10717 } 10718 env->v7m.other_ss_msp = val; 10719 return; 10720 case 0x89: /* PSP_NS */ 10721 if (!env->v7m.secure) { 10722 return; 10723 } 10724 env->v7m.other_ss_psp = val; 10725 return; 10726 case 0x8a: /* MSPLIM_NS */ 10727 if (!env->v7m.secure) { 10728 return; 10729 } 10730 env->v7m.msplim[M_REG_NS] = val & ~7; 10731 return; 10732 case 0x8b: /* PSPLIM_NS */ 10733 if (!env->v7m.secure) { 10734 return; 10735 } 10736 env->v7m.psplim[M_REG_NS] = val & ~7; 10737 return; 10738 case 0x90: /* PRIMASK_NS */ 10739 if (!env->v7m.secure) { 10740 return; 10741 } 10742 env->v7m.primask[M_REG_NS] = val & 1; 10743 return; 10744 case 0x91: /* BASEPRI_NS */ 10745 if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) { 10746 return; 10747 } 10748 env->v7m.basepri[M_REG_NS] = val & 0xff; 10749 return; 10750 case 0x93: /* FAULTMASK_NS */ 10751 if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) { 10752 return; 10753 } 10754 env->v7m.faultmask[M_REG_NS] = val & 1; 10755 return; 10756 case 0x94: /* CONTROL_NS */ 10757 if (!env->v7m.secure) { 10758 return; 10759 } 10760 write_v7m_control_spsel_for_secstate(env, 10761 val & R_V7M_CONTROL_SPSEL_MASK, 10762 M_REG_NS); 10763 if (arm_feature(env, ARM_FEATURE_M_MAIN)) { 10764 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK; 10765 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK; 10766 } 10767 return; 10768 case 0x98: /* SP_NS */ 10769 { 10770 /* This gives the non-secure SP selected based on whether we're 10771 * currently in handler mode or not, using the NS CONTROL.SPSEL. 10772 */ 10773 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; 10774 10775 if (!env->v7m.secure) { 10776 return; 10777 } 10778 if (!arm_v7m_is_handler_mode(env) && spsel) { 10779 env->v7m.other_ss_psp = val; 10780 } else { 10781 env->v7m.other_ss_msp = val; 10782 } 10783 return; 10784 } 10785 default: 10786 break; 10787 } 10788 } 10789 10790 switch (reg) { 10791 case 0 ... 7: /* xPSR sub-fields */ 10792 /* only APSR is actually writable */ 10793 if (!(reg & 4)) { 10794 uint32_t apsrmask = 0; 10795 10796 if (mask & 8) { 10797 apsrmask |= XPSR_NZCV | XPSR_Q; 10798 } 10799 if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) { 10800 apsrmask |= XPSR_GE; 10801 } 10802 xpsr_write(env, val, apsrmask); 10803 } 10804 break; 10805 case 8: /* MSP */ 10806 if (v7m_using_psp(env)) { 10807 env->v7m.other_sp = val; 10808 } else { 10809 env->regs[13] = val; 10810 } 10811 break; 10812 case 9: /* PSP */ 10813 if (v7m_using_psp(env)) { 10814 env->regs[13] = val; 10815 } else { 10816 env->v7m.other_sp = val; 10817 } 10818 break; 10819 case 10: /* MSPLIM */ 10820 if (!arm_feature(env, ARM_FEATURE_V8)) { 10821 goto bad_reg; 10822 } 10823 env->v7m.msplim[env->v7m.secure] = val & ~7; 10824 break; 10825 case 11: /* PSPLIM */ 10826 if (!arm_feature(env, ARM_FEATURE_V8)) { 10827 goto bad_reg; 10828 } 10829 env->v7m.psplim[env->v7m.secure] = val & ~7; 10830 break; 10831 case 16: /* PRIMASK */ 10832 env->v7m.primask[env->v7m.secure] = val & 1; 10833 break; 10834 case 17: /* BASEPRI */ 10835 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { 10836 goto bad_reg; 10837 } 10838 env->v7m.basepri[env->v7m.secure] = val & 0xff; 10839 break; 10840 case 18: /* BASEPRI_MAX */ 10841 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { 10842 goto bad_reg; 10843 } 10844 val &= 0xff; 10845 if (val != 0 && (val < env->v7m.basepri[env->v7m.secure] 10846 || env->v7m.basepri[env->v7m.secure] == 0)) { 10847 env->v7m.basepri[env->v7m.secure] = val; 10848 } 10849 break; 10850 case 19: /* FAULTMASK */ 10851 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { 10852 goto bad_reg; 10853 } 10854 env->v7m.faultmask[env->v7m.secure] = val & 1; 10855 break; 10856 case 20: /* CONTROL */ 10857 /* Writing to the SPSEL bit only has an effect if we are in 10858 * thread mode; other bits can be updated by any privileged code. 10859 * write_v7m_control_spsel() deals with updating the SPSEL bit in 10860 * env->v7m.control, so we only need update the others. 10861 * For v7M, we must just ignore explicit writes to SPSEL in handler 10862 * mode; for v8M the write is permitted but will have no effect. 10863 */ 10864 if (arm_feature(env, ARM_FEATURE_V8) || 10865 !arm_v7m_is_handler_mode(env)) { 10866 write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); 10867 } 10868 if (arm_feature(env, ARM_FEATURE_M_MAIN)) { 10869 env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK; 10870 env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK; 10871 } 10872 break; 10873 default: 10874 bad_reg: 10875 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special" 10876 " register %d\n", reg); 10877 return; 10878 } 10879 } 10880 10881 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) 10882 { 10883 /* Implement the TT instruction. op is bits [7:6] of the insn. */ 10884 bool forceunpriv = op & 1; 10885 bool alt = op & 2; 10886 V8M_SAttributes sattrs = {}; 10887 uint32_t tt_resp; 10888 bool r, rw, nsr, nsrw, mrvalid; 10889 int prot; 10890 ARMMMUFaultInfo fi = {}; 10891 MemTxAttrs attrs = {}; 10892 hwaddr phys_addr; 10893 ARMMMUIdx mmu_idx; 10894 uint32_t mregion; 10895 bool targetpriv; 10896 bool targetsec = env->v7m.secure; 10897 bool is_subpage; 10898 10899 /* Work out what the security state and privilege level we're 10900 * interested in is... 10901 */ 10902 if (alt) { 10903 targetsec = !targetsec; 10904 } 10905 10906 if (forceunpriv) { 10907 targetpriv = false; 10908 } else { 10909 targetpriv = arm_v7m_is_handler_mode(env) || 10910 !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK); 10911 } 10912 10913 /* ...and then figure out which MMU index this is */ 10914 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv); 10915 10916 /* We know that the MPU and SAU don't care about the access type 10917 * for our purposes beyond that we don't want to claim to be 10918 * an insn fetch, so we arbitrarily call this a read. 10919 */ 10920 10921 /* MPU region info only available for privileged or if 10922 * inspecting the other MPU state. 10923 */ 10924 if (arm_current_el(env) != 0 || alt) { 10925 /* We can ignore the return value as prot is always set */ 10926 pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, 10927 &phys_addr, &attrs, &prot, &is_subpage, 10928 &fi, &mregion); 10929 if (mregion == -1) { 10930 mrvalid = false; 10931 mregion = 0; 10932 } else { 10933 mrvalid = true; 10934 } 10935 r = prot & PAGE_READ; 10936 rw = prot & PAGE_WRITE; 10937 } else { 10938 r = false; 10939 rw = false; 10940 mrvalid = false; 10941 mregion = 0; 10942 } 10943 10944 if (env->v7m.secure) { 10945 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); 10946 nsr = sattrs.ns && r; 10947 nsrw = sattrs.ns && rw; 10948 } else { 10949 sattrs.ns = true; 10950 nsr = false; 10951 nsrw = false; 10952 } 10953 10954 tt_resp = (sattrs.iregion << 24) | 10955 (sattrs.irvalid << 23) | 10956 ((!sattrs.ns) << 22) | 10957 (nsrw << 21) | 10958 (nsr << 20) | 10959 (rw << 19) | 10960 (r << 18) | 10961 (sattrs.srvalid << 17) | 10962 (mrvalid << 16) | 10963 (sattrs.sregion << 8) | 10964 mregion; 10965 10966 return tt_resp; 10967 } 10968 10969 #endif 10970 10971 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) 10972 { 10973 /* Implement DC ZVA, which zeroes a fixed-length block of memory. 10974 * Note that we do not implement the (architecturally mandated) 10975 * alignment fault for attempts to use this on Device memory 10976 * (which matches the usual QEMU behaviour of not implementing either 10977 * alignment faults or any memory attribute handling). 10978 */ 10979 10980 ARMCPU *cpu = arm_env_get_cpu(env); 10981 uint64_t blocklen = 4 << cpu->dcz_blocksize; 10982 uint64_t vaddr = vaddr_in & ~(blocklen - 1); 10983 10984 #ifndef CONFIG_USER_ONLY 10985 { 10986 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than 10987 * the block size so we might have to do more than one TLB lookup. 10988 * We know that in fact for any v8 CPU the page size is at least 4K 10989 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only 10990 * 1K as an artefact of legacy v5 subpage support being present in the 10991 * same QEMU executable. 10992 */ 10993 int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE); 10994 void *hostaddr[maxidx]; 10995 int try, i; 10996 unsigned mmu_idx = cpu_mmu_index(env, false); 10997 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); 10998 10999 for (try = 0; try < 2; try++) { 11000 11001 for (i = 0; i < maxidx; i++) { 11002 hostaddr[i] = tlb_vaddr_to_host(env, 11003 vaddr + TARGET_PAGE_SIZE * i, 11004 1, mmu_idx); 11005 if (!hostaddr[i]) { 11006 break; 11007 } 11008 } 11009 if (i == maxidx) { 11010 /* If it's all in the TLB it's fair game for just writing to; 11011 * we know we don't need to update dirty status, etc. 11012 */ 11013 for (i = 0; i < maxidx - 1; i++) { 11014 memset(hostaddr[i], 0, TARGET_PAGE_SIZE); 11015 } 11016 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE)); 11017 return; 11018 } 11019 /* OK, try a store and see if we can populate the tlb. This 11020 * might cause an exception if the memory isn't writable, 11021 * in which case we will longjmp out of here. We must for 11022 * this purpose use the actual register value passed to us 11023 * so that we get the fault address right. 11024 */ 11025 helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC()); 11026 /* Now we can populate the other TLB entries, if any */ 11027 for (i = 0; i < maxidx; i++) { 11028 uint64_t va = vaddr + TARGET_PAGE_SIZE * i; 11029 if (va != (vaddr_in & TARGET_PAGE_MASK)) { 11030 helper_ret_stb_mmu(env, va, 0, oi, GETPC()); 11031 } 11032 } 11033 } 11034 11035 /* Slow path (probably attempt to do this to an I/O device or 11036 * similar, or clearing of a block of code we have translations 11037 * cached for). Just do a series of byte writes as the architecture 11038 * demands. It's not worth trying to use a cpu_physical_memory_map(), 11039 * memset(), unmap() sequence here because: 11040 * + we'd need to account for the blocksize being larger than a page 11041 * + the direct-RAM access case is almost always going to be dealt 11042 * with in the fastpath code above, so there's no speed benefit 11043 * + we would have to deal with the map returning NULL because the 11044 * bounce buffer was in use 11045 */ 11046 for (i = 0; i < blocklen; i++) { 11047 helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC()); 11048 } 11049 } 11050 #else 11051 memset(g2h(vaddr), 0, blocklen); 11052 #endif 11053 } 11054 11055 /* Note that signed overflow is undefined in C. The following routines are 11056 careful to use unsigned types where modulo arithmetic is required. 11057 Failure to do so _will_ break on newer gcc. */ 11058 11059 /* Signed saturating arithmetic. */ 11060 11061 /* Perform 16-bit signed saturating addition. */ 11062 static inline uint16_t add16_sat(uint16_t a, uint16_t b) 11063 { 11064 uint16_t res; 11065 11066 res = a + b; 11067 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { 11068 if (a & 0x8000) 11069 res = 0x8000; 11070 else 11071 res = 0x7fff; 11072 } 11073 return res; 11074 } 11075 11076 /* Perform 8-bit signed saturating addition. */ 11077 static inline uint8_t add8_sat(uint8_t a, uint8_t b) 11078 { 11079 uint8_t res; 11080 11081 res = a + b; 11082 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { 11083 if (a & 0x80) 11084 res = 0x80; 11085 else 11086 res = 0x7f; 11087 } 11088 return res; 11089 } 11090 11091 /* Perform 16-bit signed saturating subtraction. */ 11092 static inline uint16_t sub16_sat(uint16_t a, uint16_t b) 11093 { 11094 uint16_t res; 11095 11096 res = a - b; 11097 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { 11098 if (a & 0x8000) 11099 res = 0x8000; 11100 else 11101 res = 0x7fff; 11102 } 11103 return res; 11104 } 11105 11106 /* Perform 8-bit signed saturating subtraction. */ 11107 static inline uint8_t sub8_sat(uint8_t a, uint8_t b) 11108 { 11109 uint8_t res; 11110 11111 res = a - b; 11112 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { 11113 if (a & 0x80) 11114 res = 0x80; 11115 else 11116 res = 0x7f; 11117 } 11118 return res; 11119 } 11120 11121 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); 11122 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); 11123 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); 11124 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); 11125 #define PFX q 11126 11127 #include "op_addsub.h" 11128 11129 /* Unsigned saturating arithmetic. */ 11130 static inline uint16_t add16_usat(uint16_t a, uint16_t b) 11131 { 11132 uint16_t res; 11133 res = a + b; 11134 if (res < a) 11135 res = 0xffff; 11136 return res; 11137 } 11138 11139 static inline uint16_t sub16_usat(uint16_t a, uint16_t b) 11140 { 11141 if (a > b) 11142 return a - b; 11143 else 11144 return 0; 11145 } 11146 11147 static inline uint8_t add8_usat(uint8_t a, uint8_t b) 11148 { 11149 uint8_t res; 11150 res = a + b; 11151 if (res < a) 11152 res = 0xff; 11153 return res; 11154 } 11155 11156 static inline uint8_t sub8_usat(uint8_t a, uint8_t b) 11157 { 11158 if (a > b) 11159 return a - b; 11160 else 11161 return 0; 11162 } 11163 11164 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); 11165 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); 11166 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); 11167 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); 11168 #define PFX uq 11169 11170 #include "op_addsub.h" 11171 11172 /* Signed modulo arithmetic. */ 11173 #define SARITH16(a, b, n, op) do { \ 11174 int32_t sum; \ 11175 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ 11176 RESULT(sum, n, 16); \ 11177 if (sum >= 0) \ 11178 ge |= 3 << (n * 2); \ 11179 } while(0) 11180 11181 #define SARITH8(a, b, n, op) do { \ 11182 int32_t sum; \ 11183 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ 11184 RESULT(sum, n, 8); \ 11185 if (sum >= 0) \ 11186 ge |= 1 << n; \ 11187 } while(0) 11188 11189 11190 #define ADD16(a, b, n) SARITH16(a, b, n, +) 11191 #define SUB16(a, b, n) SARITH16(a, b, n, -) 11192 #define ADD8(a, b, n) SARITH8(a, b, n, +) 11193 #define SUB8(a, b, n) SARITH8(a, b, n, -) 11194 #define PFX s 11195 #define ARITH_GE 11196 11197 #include "op_addsub.h" 11198 11199 /* Unsigned modulo arithmetic. */ 11200 #define ADD16(a, b, n) do { \ 11201 uint32_t sum; \ 11202 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ 11203 RESULT(sum, n, 16); \ 11204 if ((sum >> 16) == 1) \ 11205 ge |= 3 << (n * 2); \ 11206 } while(0) 11207 11208 #define ADD8(a, b, n) do { \ 11209 uint32_t sum; \ 11210 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ 11211 RESULT(sum, n, 8); \ 11212 if ((sum >> 8) == 1) \ 11213 ge |= 1 << n; \ 11214 } while(0) 11215 11216 #define SUB16(a, b, n) do { \ 11217 uint32_t sum; \ 11218 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ 11219 RESULT(sum, n, 16); \ 11220 if ((sum >> 16) == 0) \ 11221 ge |= 3 << (n * 2); \ 11222 } while(0) 11223 11224 #define SUB8(a, b, n) do { \ 11225 uint32_t sum; \ 11226 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ 11227 RESULT(sum, n, 8); \ 11228 if ((sum >> 8) == 0) \ 11229 ge |= 1 << n; \ 11230 } while(0) 11231 11232 #define PFX u 11233 #define ARITH_GE 11234 11235 #include "op_addsub.h" 11236 11237 /* Halved signed arithmetic. */ 11238 #define ADD16(a, b, n) \ 11239 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) 11240 #define SUB16(a, b, n) \ 11241 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) 11242 #define ADD8(a, b, n) \ 11243 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) 11244 #define SUB8(a, b, n) \ 11245 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) 11246 #define PFX sh 11247 11248 #include "op_addsub.h" 11249 11250 /* Halved unsigned arithmetic. */ 11251 #define ADD16(a, b, n) \ 11252 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) 11253 #define SUB16(a, b, n) \ 11254 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) 11255 #define ADD8(a, b, n) \ 11256 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) 11257 #define SUB8(a, b, n) \ 11258 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) 11259 #define PFX uh 11260 11261 #include "op_addsub.h" 11262 11263 static inline uint8_t do_usad(uint8_t a, uint8_t b) 11264 { 11265 if (a > b) 11266 return a - b; 11267 else 11268 return b - a; 11269 } 11270 11271 /* Unsigned sum of absolute byte differences. */ 11272 uint32_t HELPER(usad8)(uint32_t a, uint32_t b) 11273 { 11274 uint32_t sum; 11275 sum = do_usad(a, b); 11276 sum += do_usad(a >> 8, b >> 8); 11277 sum += do_usad(a >> 16, b >>16); 11278 sum += do_usad(a >> 24, b >> 24); 11279 return sum; 11280 } 11281 11282 /* For ARMv6 SEL instruction. */ 11283 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) 11284 { 11285 uint32_t mask; 11286 11287 mask = 0; 11288 if (flags & 1) 11289 mask |= 0xff; 11290 if (flags & 2) 11291 mask |= 0xff00; 11292 if (flags & 4) 11293 mask |= 0xff0000; 11294 if (flags & 8) 11295 mask |= 0xff000000; 11296 return (a & mask) | (b & ~mask); 11297 } 11298 11299 /* VFP support. We follow the convention used for VFP instructions: 11300 Single precision routines have a "s" suffix, double precision a 11301 "d" suffix. */ 11302 11303 /* Convert host exception flags to vfp form. */ 11304 static inline int vfp_exceptbits_from_host(int host_bits) 11305 { 11306 int target_bits = 0; 11307 11308 if (host_bits & float_flag_invalid) 11309 target_bits |= 1; 11310 if (host_bits & float_flag_divbyzero) 11311 target_bits |= 2; 11312 if (host_bits & float_flag_overflow) 11313 target_bits |= 4; 11314 if (host_bits & (float_flag_underflow | float_flag_output_denormal)) 11315 target_bits |= 8; 11316 if (host_bits & float_flag_inexact) 11317 target_bits |= 0x10; 11318 if (host_bits & float_flag_input_denormal) 11319 target_bits |= 0x80; 11320 return target_bits; 11321 } 11322 11323 uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) 11324 { 11325 int i; 11326 uint32_t fpscr; 11327 11328 fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff) 11329 | (env->vfp.vec_len << 16) 11330 | (env->vfp.vec_stride << 20); 11331 11332 i = get_float_exception_flags(&env->vfp.fp_status); 11333 i |= get_float_exception_flags(&env->vfp.standard_fp_status); 11334 /* FZ16 does not generate an input denormal exception. */ 11335 i |= (get_float_exception_flags(&env->vfp.fp_status_f16) 11336 & ~float_flag_input_denormal); 11337 11338 fpscr |= vfp_exceptbits_from_host(i); 11339 return fpscr; 11340 } 11341 11342 uint32_t vfp_get_fpscr(CPUARMState *env) 11343 { 11344 return HELPER(vfp_get_fpscr)(env); 11345 } 11346 11347 /* Convert vfp exception flags to target form. */ 11348 static inline int vfp_exceptbits_to_host(int target_bits) 11349 { 11350 int host_bits = 0; 11351 11352 if (target_bits & 1) 11353 host_bits |= float_flag_invalid; 11354 if (target_bits & 2) 11355 host_bits |= float_flag_divbyzero; 11356 if (target_bits & 4) 11357 host_bits |= float_flag_overflow; 11358 if (target_bits & 8) 11359 host_bits |= float_flag_underflow; 11360 if (target_bits & 0x10) 11361 host_bits |= float_flag_inexact; 11362 if (target_bits & 0x80) 11363 host_bits |= float_flag_input_denormal; 11364 return host_bits; 11365 } 11366 11367 void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) 11368 { 11369 int i; 11370 uint32_t changed; 11371 11372 /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */ 11373 if (!arm_feature(env, ARM_FEATURE_V8_FP16)) { 11374 val &= ~FPCR_FZ16; 11375 } 11376 11377 changed = env->vfp.xregs[ARM_VFP_FPSCR]; 11378 env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff); 11379 env->vfp.vec_len = (val >> 16) & 7; 11380 env->vfp.vec_stride = (val >> 20) & 3; 11381 11382 changed ^= val; 11383 if (changed & (3 << 22)) { 11384 i = (val >> 22) & 3; 11385 switch (i) { 11386 case FPROUNDING_TIEEVEN: 11387 i = float_round_nearest_even; 11388 break; 11389 case FPROUNDING_POSINF: 11390 i = float_round_up; 11391 break; 11392 case FPROUNDING_NEGINF: 11393 i = float_round_down; 11394 break; 11395 case FPROUNDING_ZERO: 11396 i = float_round_to_zero; 11397 break; 11398 } 11399 set_float_rounding_mode(i, &env->vfp.fp_status); 11400 set_float_rounding_mode(i, &env->vfp.fp_status_f16); 11401 } 11402 if (changed & FPCR_FZ16) { 11403 bool ftz_enabled = val & FPCR_FZ16; 11404 set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16); 11405 set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16); 11406 } 11407 if (changed & FPCR_FZ) { 11408 bool ftz_enabled = val & FPCR_FZ; 11409 set_flush_to_zero(ftz_enabled, &env->vfp.fp_status); 11410 set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status); 11411 } 11412 if (changed & FPCR_DN) { 11413 bool dnan_enabled = val & FPCR_DN; 11414 set_default_nan_mode(dnan_enabled, &env->vfp.fp_status); 11415 set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16); 11416 } 11417 11418 /* The exception flags are ORed together when we read fpscr so we 11419 * only need to preserve the current state in one of our 11420 * float_status values. 11421 */ 11422 i = vfp_exceptbits_to_host(val); 11423 set_float_exception_flags(i, &env->vfp.fp_status); 11424 set_float_exception_flags(0, &env->vfp.fp_status_f16); 11425 set_float_exception_flags(0, &env->vfp.standard_fp_status); 11426 } 11427 11428 void vfp_set_fpscr(CPUARMState *env, uint32_t val) 11429 { 11430 HELPER(vfp_set_fpscr)(env, val); 11431 } 11432 11433 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) 11434 11435 #define VFP_BINOP(name) \ 11436 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \ 11437 { \ 11438 float_status *fpst = fpstp; \ 11439 return float32_ ## name(a, b, fpst); \ 11440 } \ 11441 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \ 11442 { \ 11443 float_status *fpst = fpstp; \ 11444 return float64_ ## name(a, b, fpst); \ 11445 } 11446 VFP_BINOP(add) 11447 VFP_BINOP(sub) 11448 VFP_BINOP(mul) 11449 VFP_BINOP(div) 11450 VFP_BINOP(min) 11451 VFP_BINOP(max) 11452 VFP_BINOP(minnum) 11453 VFP_BINOP(maxnum) 11454 #undef VFP_BINOP 11455 11456 float32 VFP_HELPER(neg, s)(float32 a) 11457 { 11458 return float32_chs(a); 11459 } 11460 11461 float64 VFP_HELPER(neg, d)(float64 a) 11462 { 11463 return float64_chs(a); 11464 } 11465 11466 float32 VFP_HELPER(abs, s)(float32 a) 11467 { 11468 return float32_abs(a); 11469 } 11470 11471 float64 VFP_HELPER(abs, d)(float64 a) 11472 { 11473 return float64_abs(a); 11474 } 11475 11476 float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env) 11477 { 11478 return float32_sqrt(a, &env->vfp.fp_status); 11479 } 11480 11481 float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env) 11482 { 11483 return float64_sqrt(a, &env->vfp.fp_status); 11484 } 11485 11486 /* XXX: check quiet/signaling case */ 11487 #define DO_VFP_cmp(p, type) \ 11488 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \ 11489 { \ 11490 uint32_t flags; \ 11491 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \ 11492 case 0: flags = 0x6; break; \ 11493 case -1: flags = 0x8; break; \ 11494 case 1: flags = 0x2; break; \ 11495 default: case 2: flags = 0x3; break; \ 11496 } \ 11497 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ 11498 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ 11499 } \ 11500 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \ 11501 { \ 11502 uint32_t flags; \ 11503 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \ 11504 case 0: flags = 0x6; break; \ 11505 case -1: flags = 0x8; break; \ 11506 case 1: flags = 0x2; break; \ 11507 default: case 2: flags = 0x3; break; \ 11508 } \ 11509 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ 11510 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ 11511 } 11512 DO_VFP_cmp(s, float32) 11513 DO_VFP_cmp(d, float64) 11514 #undef DO_VFP_cmp 11515 11516 /* Integer to float and float to integer conversions */ 11517 11518 #define CONV_ITOF(name, ftype, fsz, sign) \ 11519 ftype HELPER(name)(uint32_t x, void *fpstp) \ 11520 { \ 11521 float_status *fpst = fpstp; \ 11522 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \ 11523 } 11524 11525 #define CONV_FTOI(name, ftype, fsz, sign, round) \ 11526 sign##int32_t HELPER(name)(ftype x, void *fpstp) \ 11527 { \ 11528 float_status *fpst = fpstp; \ 11529 if (float##fsz##_is_any_nan(x)) { \ 11530 float_raise(float_flag_invalid, fpst); \ 11531 return 0; \ 11532 } \ 11533 return float##fsz##_to_##sign##int32##round(x, fpst); \ 11534 } 11535 11536 #define FLOAT_CONVS(name, p, ftype, fsz, sign) \ 11537 CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \ 11538 CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \ 11539 CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero) 11540 11541 FLOAT_CONVS(si, h, uint32_t, 16, ) 11542 FLOAT_CONVS(si, s, float32, 32, ) 11543 FLOAT_CONVS(si, d, float64, 64, ) 11544 FLOAT_CONVS(ui, h, uint32_t, 16, u) 11545 FLOAT_CONVS(ui, s, float32, 32, u) 11546 FLOAT_CONVS(ui, d, float64, 64, u) 11547 11548 #undef CONV_ITOF 11549 #undef CONV_FTOI 11550 #undef FLOAT_CONVS 11551 11552 /* floating point conversion */ 11553 float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) 11554 { 11555 return float32_to_float64(x, &env->vfp.fp_status); 11556 } 11557 11558 float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) 11559 { 11560 return float64_to_float32(x, &env->vfp.fp_status); 11561 } 11562 11563 /* VFP3 fixed point conversion. */ 11564 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11565 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ 11566 void *fpstp) \ 11567 { \ 11568 float_status *fpst = fpstp; \ 11569 float##fsz tmp; \ 11570 tmp = itype##_to_##float##fsz(x, fpst); \ 11571 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \ 11572 } 11573 11574 /* Notice that we want only input-denormal exception flags from the 11575 * scalbn operation: the other possible flags (overflow+inexact if 11576 * we overflow to infinity, output-denormal) aren't correct for the 11577 * complete scale-and-convert operation. 11578 */ 11579 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \ 11580 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \ 11581 uint32_t shift, \ 11582 void *fpstp) \ 11583 { \ 11584 float_status *fpst = fpstp; \ 11585 int old_exc_flags = get_float_exception_flags(fpst); \ 11586 float##fsz tmp; \ 11587 if (float##fsz##_is_any_nan(x)) { \ 11588 float_raise(float_flag_invalid, fpst); \ 11589 return 0; \ 11590 } \ 11591 tmp = float##fsz##_scalbn(x, shift, fpst); \ 11592 old_exc_flags |= get_float_exception_flags(fpst) \ 11593 & float_flag_input_denormal; \ 11594 set_float_exception_flags(old_exc_flags, fpst); \ 11595 return float##fsz##_to_##itype##round(tmp, fpst); \ 11596 } 11597 11598 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \ 11599 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11600 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \ 11601 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ) 11602 11603 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \ 11604 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11605 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ) 11606 11607 VFP_CONV_FIX(sh, d, 64, 64, int16) 11608 VFP_CONV_FIX(sl, d, 64, 64, int32) 11609 VFP_CONV_FIX_A64(sq, d, 64, 64, int64) 11610 VFP_CONV_FIX(uh, d, 64, 64, uint16) 11611 VFP_CONV_FIX(ul, d, 64, 64, uint32) 11612 VFP_CONV_FIX_A64(uq, d, 64, 64, uint64) 11613 VFP_CONV_FIX(sh, s, 32, 32, int16) 11614 VFP_CONV_FIX(sl, s, 32, 32, int32) 11615 VFP_CONV_FIX_A64(sq, s, 32, 64, int64) 11616 VFP_CONV_FIX(uh, s, 32, 32, uint16) 11617 VFP_CONV_FIX(ul, s, 32, 32, uint32) 11618 VFP_CONV_FIX_A64(uq, s, 32, 64, uint64) 11619 11620 #undef VFP_CONV_FIX 11621 #undef VFP_CONV_FIX_FLOAT 11622 #undef VFP_CONV_FLOAT_FIX_ROUND 11623 #undef VFP_CONV_FIX_A64 11624 11625 /* Conversion to/from f16 can overflow to infinity before/after scaling. 11626 * Therefore we convert to f64, scale, and then convert f64 to f16; or 11627 * vice versa for conversion to integer. 11628 * 11629 * For 16- and 32-bit integers, the conversion to f64 never rounds. 11630 * For 64-bit integers, any integer that would cause rounding will also 11631 * overflow to f16 infinity, so there is no double rounding problem. 11632 */ 11633 11634 static float16 do_postscale_fp16(float64 f, int shift, float_status *fpst) 11635 { 11636 return float64_to_float16(float64_scalbn(f, -shift, fpst), true, fpst); 11637 } 11638 11639 uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst) 11640 { 11641 return do_postscale_fp16(int32_to_float64(x, fpst), shift, fpst); 11642 } 11643 11644 uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst) 11645 { 11646 return do_postscale_fp16(uint32_to_float64(x, fpst), shift, fpst); 11647 } 11648 11649 uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst) 11650 { 11651 return do_postscale_fp16(int64_to_float64(x, fpst), shift, fpst); 11652 } 11653 11654 uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst) 11655 { 11656 return do_postscale_fp16(uint64_to_float64(x, fpst), shift, fpst); 11657 } 11658 11659 static float64 do_prescale_fp16(float16 f, int shift, float_status *fpst) 11660 { 11661 if (unlikely(float16_is_any_nan(f))) { 11662 float_raise(float_flag_invalid, fpst); 11663 return 0; 11664 } else { 11665 int old_exc_flags = get_float_exception_flags(fpst); 11666 float64 ret; 11667 11668 ret = float16_to_float64(f, true, fpst); 11669 ret = float64_scalbn(ret, shift, fpst); 11670 old_exc_flags |= get_float_exception_flags(fpst) 11671 & float_flag_input_denormal; 11672 set_float_exception_flags(old_exc_flags, fpst); 11673 11674 return ret; 11675 } 11676 } 11677 11678 uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst) 11679 { 11680 return float64_to_int16(do_prescale_fp16(x, shift, fpst), fpst); 11681 } 11682 11683 uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst) 11684 { 11685 return float64_to_uint16(do_prescale_fp16(x, shift, fpst), fpst); 11686 } 11687 11688 uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst) 11689 { 11690 return float64_to_int32(do_prescale_fp16(x, shift, fpst), fpst); 11691 } 11692 11693 uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst) 11694 { 11695 return float64_to_uint32(do_prescale_fp16(x, shift, fpst), fpst); 11696 } 11697 11698 uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst) 11699 { 11700 return float64_to_int64(do_prescale_fp16(x, shift, fpst), fpst); 11701 } 11702 11703 uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst) 11704 { 11705 return float64_to_uint64(do_prescale_fp16(x, shift, fpst), fpst); 11706 } 11707 11708 /* Set the current fp rounding mode and return the old one. 11709 * The argument is a softfloat float_round_ value. 11710 */ 11711 uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp) 11712 { 11713 float_status *fp_status = fpstp; 11714 11715 uint32_t prev_rmode = get_float_rounding_mode(fp_status); 11716 set_float_rounding_mode(rmode, fp_status); 11717 11718 return prev_rmode; 11719 } 11720 11721 /* Set the current fp rounding mode in the standard fp status and return 11722 * the old one. This is for NEON instructions that need to change the 11723 * rounding mode but wish to use the standard FPSCR values for everything 11724 * else. Always set the rounding mode back to the correct value after 11725 * modifying it. 11726 * The argument is a softfloat float_round_ value. 11727 */ 11728 uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env) 11729 { 11730 float_status *fp_status = &env->vfp.standard_fp_status; 11731 11732 uint32_t prev_rmode = get_float_rounding_mode(fp_status); 11733 set_float_rounding_mode(rmode, fp_status); 11734 11735 return prev_rmode; 11736 } 11737 11738 /* Half precision conversions. */ 11739 float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode) 11740 { 11741 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11742 * it would affect flushing input denormals. 11743 */ 11744 float_status *fpst = fpstp; 11745 flag save = get_flush_inputs_to_zero(fpst); 11746 set_flush_inputs_to_zero(false, fpst); 11747 float32 r = float16_to_float32(a, !ahp_mode, fpst); 11748 set_flush_inputs_to_zero(save, fpst); 11749 return r; 11750 } 11751 11752 uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode) 11753 { 11754 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11755 * it would affect flushing output denormals. 11756 */ 11757 float_status *fpst = fpstp; 11758 flag save = get_flush_to_zero(fpst); 11759 set_flush_to_zero(false, fpst); 11760 float16 r = float32_to_float16(a, !ahp_mode, fpst); 11761 set_flush_to_zero(save, fpst); 11762 return r; 11763 } 11764 11765 float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode) 11766 { 11767 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11768 * it would affect flushing input denormals. 11769 */ 11770 float_status *fpst = fpstp; 11771 flag save = get_flush_inputs_to_zero(fpst); 11772 set_flush_inputs_to_zero(false, fpst); 11773 float64 r = float16_to_float64(a, !ahp_mode, fpst); 11774 set_flush_inputs_to_zero(save, fpst); 11775 return r; 11776 } 11777 11778 uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode) 11779 { 11780 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11781 * it would affect flushing output denormals. 11782 */ 11783 float_status *fpst = fpstp; 11784 flag save = get_flush_to_zero(fpst); 11785 set_flush_to_zero(false, fpst); 11786 float16 r = float64_to_float16(a, !ahp_mode, fpst); 11787 set_flush_to_zero(save, fpst); 11788 return r; 11789 } 11790 11791 #define float32_two make_float32(0x40000000) 11792 #define float32_three make_float32(0x40400000) 11793 #define float32_one_point_five make_float32(0x3fc00000) 11794 11795 float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env) 11796 { 11797 float_status *s = &env->vfp.standard_fp_status; 11798 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || 11799 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { 11800 if (!(float32_is_zero(a) || float32_is_zero(b))) { 11801 float_raise(float_flag_input_denormal, s); 11802 } 11803 return float32_two; 11804 } 11805 return float32_sub(float32_two, float32_mul(a, b, s), s); 11806 } 11807 11808 float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env) 11809 { 11810 float_status *s = &env->vfp.standard_fp_status; 11811 float32 product; 11812 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || 11813 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { 11814 if (!(float32_is_zero(a) || float32_is_zero(b))) { 11815 float_raise(float_flag_input_denormal, s); 11816 } 11817 return float32_one_point_five; 11818 } 11819 product = float32_mul(a, b, s); 11820 return float32_div(float32_sub(float32_three, product, s), float32_two, s); 11821 } 11822 11823 /* NEON helpers. */ 11824 11825 /* Constants 256 and 512 are used in some helpers; we avoid relying on 11826 * int->float conversions at run-time. */ 11827 #define float64_256 make_float64(0x4070000000000000LL) 11828 #define float64_512 make_float64(0x4080000000000000LL) 11829 #define float16_maxnorm make_float16(0x7bff) 11830 #define float32_maxnorm make_float32(0x7f7fffff) 11831 #define float64_maxnorm make_float64(0x7fefffffffffffffLL) 11832 11833 /* Reciprocal functions 11834 * 11835 * The algorithm that must be used to calculate the estimate 11836 * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate 11837 */ 11838 11839 /* See RecipEstimate() 11840 * 11841 * input is a 9 bit fixed point number 11842 * input range 256 .. 511 for a number from 0.5 <= x < 1.0. 11843 * result range 256 .. 511 for a number from 1.0 to 511/256. 11844 */ 11845 11846 static int recip_estimate(int input) 11847 { 11848 int a, b, r; 11849 assert(256 <= input && input < 512); 11850 a = (input * 2) + 1; 11851 b = (1 << 19) / a; 11852 r = (b + 1) >> 1; 11853 assert(256 <= r && r < 512); 11854 return r; 11855 } 11856 11857 /* 11858 * Common wrapper to call recip_estimate 11859 * 11860 * The parameters are exponent and 64 bit fraction (without implicit 11861 * bit) where the binary point is nominally at bit 52. Returns a 11862 * float64 which can then be rounded to the appropriate size by the 11863 * callee. 11864 */ 11865 11866 static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac) 11867 { 11868 uint32_t scaled, estimate; 11869 uint64_t result_frac; 11870 int result_exp; 11871 11872 /* Handle sub-normals */ 11873 if (*exp == 0) { 11874 if (extract64(frac, 51, 1) == 0) { 11875 *exp = -1; 11876 frac <<= 2; 11877 } else { 11878 frac <<= 1; 11879 } 11880 } 11881 11882 /* scaled = UInt('1':fraction<51:44>) */ 11883 scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); 11884 estimate = recip_estimate(scaled); 11885 11886 result_exp = exp_off - *exp; 11887 result_frac = deposit64(0, 44, 8, estimate); 11888 if (result_exp == 0) { 11889 result_frac = deposit64(result_frac >> 1, 51, 1, 1); 11890 } else if (result_exp == -1) { 11891 result_frac = deposit64(result_frac >> 2, 50, 2, 1); 11892 result_exp = 0; 11893 } 11894 11895 *exp = result_exp; 11896 11897 return result_frac; 11898 } 11899 11900 static bool round_to_inf(float_status *fpst, bool sign_bit) 11901 { 11902 switch (fpst->float_rounding_mode) { 11903 case float_round_nearest_even: /* Round to Nearest */ 11904 return true; 11905 case float_round_up: /* Round to +Inf */ 11906 return !sign_bit; 11907 case float_round_down: /* Round to -Inf */ 11908 return sign_bit; 11909 case float_round_to_zero: /* Round to Zero */ 11910 return false; 11911 } 11912 11913 g_assert_not_reached(); 11914 } 11915 11916 uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp) 11917 { 11918 float_status *fpst = fpstp; 11919 float16 f16 = float16_squash_input_denormal(input, fpst); 11920 uint32_t f16_val = float16_val(f16); 11921 uint32_t f16_sign = float16_is_neg(f16); 11922 int f16_exp = extract32(f16_val, 10, 5); 11923 uint32_t f16_frac = extract32(f16_val, 0, 10); 11924 uint64_t f64_frac; 11925 11926 if (float16_is_any_nan(f16)) { 11927 float16 nan = f16; 11928 if (float16_is_signaling_nan(f16, fpst)) { 11929 float_raise(float_flag_invalid, fpst); 11930 nan = float16_silence_nan(f16, fpst); 11931 } 11932 if (fpst->default_nan_mode) { 11933 nan = float16_default_nan(fpst); 11934 } 11935 return nan; 11936 } else if (float16_is_infinity(f16)) { 11937 return float16_set_sign(float16_zero, float16_is_neg(f16)); 11938 } else if (float16_is_zero(f16)) { 11939 float_raise(float_flag_divbyzero, fpst); 11940 return float16_set_sign(float16_infinity, float16_is_neg(f16)); 11941 } else if (float16_abs(f16) < (1 << 8)) { 11942 /* Abs(value) < 2.0^-16 */ 11943 float_raise(float_flag_overflow | float_flag_inexact, fpst); 11944 if (round_to_inf(fpst, f16_sign)) { 11945 return float16_set_sign(float16_infinity, f16_sign); 11946 } else { 11947 return float16_set_sign(float16_maxnorm, f16_sign); 11948 } 11949 } else if (f16_exp >= 29 && fpst->flush_to_zero) { 11950 float_raise(float_flag_underflow, fpst); 11951 return float16_set_sign(float16_zero, float16_is_neg(f16)); 11952 } 11953 11954 f64_frac = call_recip_estimate(&f16_exp, 29, 11955 ((uint64_t) f16_frac) << (52 - 10)); 11956 11957 /* result = sign : result_exp<4:0> : fraction<51:42> */ 11958 f16_val = deposit32(0, 15, 1, f16_sign); 11959 f16_val = deposit32(f16_val, 10, 5, f16_exp); 11960 f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10)); 11961 return make_float16(f16_val); 11962 } 11963 11964 float32 HELPER(recpe_f32)(float32 input, void *fpstp) 11965 { 11966 float_status *fpst = fpstp; 11967 float32 f32 = float32_squash_input_denormal(input, fpst); 11968 uint32_t f32_val = float32_val(f32); 11969 bool f32_sign = float32_is_neg(f32); 11970 int f32_exp = extract32(f32_val, 23, 8); 11971 uint32_t f32_frac = extract32(f32_val, 0, 23); 11972 uint64_t f64_frac; 11973 11974 if (float32_is_any_nan(f32)) { 11975 float32 nan = f32; 11976 if (float32_is_signaling_nan(f32, fpst)) { 11977 float_raise(float_flag_invalid, fpst); 11978 nan = float32_silence_nan(f32, fpst); 11979 } 11980 if (fpst->default_nan_mode) { 11981 nan = float32_default_nan(fpst); 11982 } 11983 return nan; 11984 } else if (float32_is_infinity(f32)) { 11985 return float32_set_sign(float32_zero, float32_is_neg(f32)); 11986 } else if (float32_is_zero(f32)) { 11987 float_raise(float_flag_divbyzero, fpst); 11988 return float32_set_sign(float32_infinity, float32_is_neg(f32)); 11989 } else if (float32_abs(f32) < (1ULL << 21)) { 11990 /* Abs(value) < 2.0^-128 */ 11991 float_raise(float_flag_overflow | float_flag_inexact, fpst); 11992 if (round_to_inf(fpst, f32_sign)) { 11993 return float32_set_sign(float32_infinity, f32_sign); 11994 } else { 11995 return float32_set_sign(float32_maxnorm, f32_sign); 11996 } 11997 } else if (f32_exp >= 253 && fpst->flush_to_zero) { 11998 float_raise(float_flag_underflow, fpst); 11999 return float32_set_sign(float32_zero, float32_is_neg(f32)); 12000 } 12001 12002 f64_frac = call_recip_estimate(&f32_exp, 253, 12003 ((uint64_t) f32_frac) << (52 - 23)); 12004 12005 /* result = sign : result_exp<7:0> : fraction<51:29> */ 12006 f32_val = deposit32(0, 31, 1, f32_sign); 12007 f32_val = deposit32(f32_val, 23, 8, f32_exp); 12008 f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23)); 12009 return make_float32(f32_val); 12010 } 12011 12012 float64 HELPER(recpe_f64)(float64 input, void *fpstp) 12013 { 12014 float_status *fpst = fpstp; 12015 float64 f64 = float64_squash_input_denormal(input, fpst); 12016 uint64_t f64_val = float64_val(f64); 12017 bool f64_sign = float64_is_neg(f64); 12018 int f64_exp = extract64(f64_val, 52, 11); 12019 uint64_t f64_frac = extract64(f64_val, 0, 52); 12020 12021 /* Deal with any special cases */ 12022 if (float64_is_any_nan(f64)) { 12023 float64 nan = f64; 12024 if (float64_is_signaling_nan(f64, fpst)) { 12025 float_raise(float_flag_invalid, fpst); 12026 nan = float64_silence_nan(f64, fpst); 12027 } 12028 if (fpst->default_nan_mode) { 12029 nan = float64_default_nan(fpst); 12030 } 12031 return nan; 12032 } else if (float64_is_infinity(f64)) { 12033 return float64_set_sign(float64_zero, float64_is_neg(f64)); 12034 } else if (float64_is_zero(f64)) { 12035 float_raise(float_flag_divbyzero, fpst); 12036 return float64_set_sign(float64_infinity, float64_is_neg(f64)); 12037 } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) { 12038 /* Abs(value) < 2.0^-1024 */ 12039 float_raise(float_flag_overflow | float_flag_inexact, fpst); 12040 if (round_to_inf(fpst, f64_sign)) { 12041 return float64_set_sign(float64_infinity, f64_sign); 12042 } else { 12043 return float64_set_sign(float64_maxnorm, f64_sign); 12044 } 12045 } else if (f64_exp >= 2045 && fpst->flush_to_zero) { 12046 float_raise(float_flag_underflow, fpst); 12047 return float64_set_sign(float64_zero, float64_is_neg(f64)); 12048 } 12049 12050 f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac); 12051 12052 /* result = sign : result_exp<10:0> : fraction<51:0>; */ 12053 f64_val = deposit64(0, 63, 1, f64_sign); 12054 f64_val = deposit64(f64_val, 52, 11, f64_exp); 12055 f64_val = deposit64(f64_val, 0, 52, f64_frac); 12056 return make_float64(f64_val); 12057 } 12058 12059 /* The algorithm that must be used to calculate the estimate 12060 * is specified by the ARM ARM. 12061 */ 12062 12063 static int do_recip_sqrt_estimate(int a) 12064 { 12065 int b, estimate; 12066 12067 assert(128 <= a && a < 512); 12068 if (a < 256) { 12069 a = a * 2 + 1; 12070 } else { 12071 a = (a >> 1) << 1; 12072 a = (a + 1) * 2; 12073 } 12074 b = 512; 12075 while (a * (b + 1) * (b + 1) < (1 << 28)) { 12076 b += 1; 12077 } 12078 estimate = (b + 1) / 2; 12079 assert(256 <= estimate && estimate < 512); 12080 12081 return estimate; 12082 } 12083 12084 12085 static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac) 12086 { 12087 int estimate; 12088 uint32_t scaled; 12089 12090 if (*exp == 0) { 12091 while (extract64(frac, 51, 1) == 0) { 12092 frac = frac << 1; 12093 *exp -= 1; 12094 } 12095 frac = extract64(frac, 0, 51) << 1; 12096 } 12097 12098 if (*exp & 1) { 12099 /* scaled = UInt('01':fraction<51:45>) */ 12100 scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7)); 12101 } else { 12102 /* scaled = UInt('1':fraction<51:44>) */ 12103 scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); 12104 } 12105 estimate = do_recip_sqrt_estimate(scaled); 12106 12107 *exp = (exp_off - *exp) / 2; 12108 return extract64(estimate, 0, 8) << 44; 12109 } 12110 12111 uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp) 12112 { 12113 float_status *s = fpstp; 12114 float16 f16 = float16_squash_input_denormal(input, s); 12115 uint16_t val = float16_val(f16); 12116 bool f16_sign = float16_is_neg(f16); 12117 int f16_exp = extract32(val, 10, 5); 12118 uint16_t f16_frac = extract32(val, 0, 10); 12119 uint64_t f64_frac; 12120 12121 if (float16_is_any_nan(f16)) { 12122 float16 nan = f16; 12123 if (float16_is_signaling_nan(f16, s)) { 12124 float_raise(float_flag_invalid, s); 12125 nan = float16_silence_nan(f16, s); 12126 } 12127 if (s->default_nan_mode) { 12128 nan = float16_default_nan(s); 12129 } 12130 return nan; 12131 } else if (float16_is_zero(f16)) { 12132 float_raise(float_flag_divbyzero, s); 12133 return float16_set_sign(float16_infinity, f16_sign); 12134 } else if (f16_sign) { 12135 float_raise(float_flag_invalid, s); 12136 return float16_default_nan(s); 12137 } else if (float16_is_infinity(f16)) { 12138 return float16_zero; 12139 } 12140 12141 /* Scale and normalize to a double-precision value between 0.25 and 1.0, 12142 * preserving the parity of the exponent. */ 12143 12144 f64_frac = ((uint64_t) f16_frac) << (52 - 10); 12145 12146 f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac); 12147 12148 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */ 12149 val = deposit32(0, 15, 1, f16_sign); 12150 val = deposit32(val, 10, 5, f16_exp); 12151 val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8)); 12152 return make_float16(val); 12153 } 12154 12155 float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) 12156 { 12157 float_status *s = fpstp; 12158 float32 f32 = float32_squash_input_denormal(input, s); 12159 uint32_t val = float32_val(f32); 12160 uint32_t f32_sign = float32_is_neg(f32); 12161 int f32_exp = extract32(val, 23, 8); 12162 uint32_t f32_frac = extract32(val, 0, 23); 12163 uint64_t f64_frac; 12164 12165 if (float32_is_any_nan(f32)) { 12166 float32 nan = f32; 12167 if (float32_is_signaling_nan(f32, s)) { 12168 float_raise(float_flag_invalid, s); 12169 nan = float32_silence_nan(f32, s); 12170 } 12171 if (s->default_nan_mode) { 12172 nan = float32_default_nan(s); 12173 } 12174 return nan; 12175 } else if (float32_is_zero(f32)) { 12176 float_raise(float_flag_divbyzero, s); 12177 return float32_set_sign(float32_infinity, float32_is_neg(f32)); 12178 } else if (float32_is_neg(f32)) { 12179 float_raise(float_flag_invalid, s); 12180 return float32_default_nan(s); 12181 } else if (float32_is_infinity(f32)) { 12182 return float32_zero; 12183 } 12184 12185 /* Scale and normalize to a double-precision value between 0.25 and 1.0, 12186 * preserving the parity of the exponent. */ 12187 12188 f64_frac = ((uint64_t) f32_frac) << 29; 12189 12190 f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac); 12191 12192 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */ 12193 val = deposit32(0, 31, 1, f32_sign); 12194 val = deposit32(val, 23, 8, f32_exp); 12195 val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8)); 12196 return make_float32(val); 12197 } 12198 12199 float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) 12200 { 12201 float_status *s = fpstp; 12202 float64 f64 = float64_squash_input_denormal(input, s); 12203 uint64_t val = float64_val(f64); 12204 bool f64_sign = float64_is_neg(f64); 12205 int f64_exp = extract64(val, 52, 11); 12206 uint64_t f64_frac = extract64(val, 0, 52); 12207 12208 if (float64_is_any_nan(f64)) { 12209 float64 nan = f64; 12210 if (float64_is_signaling_nan(f64, s)) { 12211 float_raise(float_flag_invalid, s); 12212 nan = float64_silence_nan(f64, s); 12213 } 12214 if (s->default_nan_mode) { 12215 nan = float64_default_nan(s); 12216 } 12217 return nan; 12218 } else if (float64_is_zero(f64)) { 12219 float_raise(float_flag_divbyzero, s); 12220 return float64_set_sign(float64_infinity, float64_is_neg(f64)); 12221 } else if (float64_is_neg(f64)) { 12222 float_raise(float_flag_invalid, s); 12223 return float64_default_nan(s); 12224 } else if (float64_is_infinity(f64)) { 12225 return float64_zero; 12226 } 12227 12228 f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac); 12229 12230 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */ 12231 val = deposit64(0, 61, 1, f64_sign); 12232 val = deposit64(val, 52, 11, f64_exp); 12233 val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8)); 12234 return make_float64(val); 12235 } 12236 12237 uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp) 12238 { 12239 /* float_status *s = fpstp; */ 12240 int input, estimate; 12241 12242 if ((a & 0x80000000) == 0) { 12243 return 0xffffffff; 12244 } 12245 12246 input = extract32(a, 23, 9); 12247 estimate = recip_estimate(input); 12248 12249 return deposit32(0, (32 - 9), 9, estimate); 12250 } 12251 12252 uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp) 12253 { 12254 int estimate; 12255 12256 if ((a & 0xc0000000) == 0) { 12257 return 0xffffffff; 12258 } 12259 12260 estimate = do_recip_sqrt_estimate(extract32(a, 23, 9)); 12261 12262 return deposit32(0, 23, 9, estimate); 12263 } 12264 12265 /* VFPv4 fused multiply-accumulate */ 12266 float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) 12267 { 12268 float_status *fpst = fpstp; 12269 return float32_muladd(a, b, c, 0, fpst); 12270 } 12271 12272 float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) 12273 { 12274 float_status *fpst = fpstp; 12275 return float64_muladd(a, b, c, 0, fpst); 12276 } 12277 12278 /* ARMv8 round to integral */ 12279 float32 HELPER(rints_exact)(float32 x, void *fp_status) 12280 { 12281 return float32_round_to_int(x, fp_status); 12282 } 12283 12284 float64 HELPER(rintd_exact)(float64 x, void *fp_status) 12285 { 12286 return float64_round_to_int(x, fp_status); 12287 } 12288 12289 float32 HELPER(rints)(float32 x, void *fp_status) 12290 { 12291 int old_flags = get_float_exception_flags(fp_status), new_flags; 12292 float32 ret; 12293 12294 ret = float32_round_to_int(x, fp_status); 12295 12296 /* Suppress any inexact exceptions the conversion produced */ 12297 if (!(old_flags & float_flag_inexact)) { 12298 new_flags = get_float_exception_flags(fp_status); 12299 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); 12300 } 12301 12302 return ret; 12303 } 12304 12305 float64 HELPER(rintd)(float64 x, void *fp_status) 12306 { 12307 int old_flags = get_float_exception_flags(fp_status), new_flags; 12308 float64 ret; 12309 12310 ret = float64_round_to_int(x, fp_status); 12311 12312 new_flags = get_float_exception_flags(fp_status); 12313 12314 /* Suppress any inexact exceptions the conversion produced */ 12315 if (!(old_flags & float_flag_inexact)) { 12316 new_flags = get_float_exception_flags(fp_status); 12317 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); 12318 } 12319 12320 return ret; 12321 } 12322 12323 /* Convert ARM rounding mode to softfloat */ 12324 int arm_rmode_to_sf(int rmode) 12325 { 12326 switch (rmode) { 12327 case FPROUNDING_TIEAWAY: 12328 rmode = float_round_ties_away; 12329 break; 12330 case FPROUNDING_ODD: 12331 /* FIXME: add support for TIEAWAY and ODD */ 12332 qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", 12333 rmode); 12334 case FPROUNDING_TIEEVEN: 12335 default: 12336 rmode = float_round_nearest_even; 12337 break; 12338 case FPROUNDING_POSINF: 12339 rmode = float_round_up; 12340 break; 12341 case FPROUNDING_NEGINF: 12342 rmode = float_round_down; 12343 break; 12344 case FPROUNDING_ZERO: 12345 rmode = float_round_to_zero; 12346 break; 12347 } 12348 return rmode; 12349 } 12350 12351 /* CRC helpers. 12352 * The upper bytes of val (above the number specified by 'bytes') must have 12353 * been zeroed out by the caller. 12354 */ 12355 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) 12356 { 12357 uint8_t buf[4]; 12358 12359 stl_le_p(buf, val); 12360 12361 /* zlib crc32 converts the accumulator and output to one's complement. */ 12362 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; 12363 } 12364 12365 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) 12366 { 12367 uint8_t buf[4]; 12368 12369 stl_le_p(buf, val); 12370 12371 /* Linux crc32c converts the output to one's complement. */ 12372 return crc32c(acc, buf, bytes) ^ 0xffffffff; 12373 } 12374 12375 /* Return the exception level to which FP-disabled exceptions should 12376 * be taken, or 0 if FP is enabled. 12377 */ 12378 static inline int fp_exception_el(CPUARMState *env) 12379 { 12380 #ifndef CONFIG_USER_ONLY 12381 int fpen; 12382 int cur_el = arm_current_el(env); 12383 12384 /* CPACR and the CPTR registers don't exist before v6, so FP is 12385 * always accessible 12386 */ 12387 if (!arm_feature(env, ARM_FEATURE_V6)) { 12388 return 0; 12389 } 12390 12391 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: 12392 * 0, 2 : trap EL0 and EL1/PL1 accesses 12393 * 1 : trap only EL0 accesses 12394 * 3 : trap no accesses 12395 */ 12396 fpen = extract32(env->cp15.cpacr_el1, 20, 2); 12397 switch (fpen) { 12398 case 0: 12399 case 2: 12400 if (cur_el == 0 || cur_el == 1) { 12401 /* Trap to PL1, which might be EL1 or EL3 */ 12402 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 12403 return 3; 12404 } 12405 return 1; 12406 } 12407 if (cur_el == 3 && !is_a64(env)) { 12408 /* Secure PL1 running at EL3 */ 12409 return 3; 12410 } 12411 break; 12412 case 1: 12413 if (cur_el == 0) { 12414 return 1; 12415 } 12416 break; 12417 case 3: 12418 break; 12419 } 12420 12421 /* For the CPTR registers we don't need to guard with an ARM_FEATURE 12422 * check because zero bits in the registers mean "don't trap". 12423 */ 12424 12425 /* CPTR_EL2 : present in v7VE or v8 */ 12426 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1) 12427 && !arm_is_secure_below_el3(env)) { 12428 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */ 12429 return 2; 12430 } 12431 12432 /* CPTR_EL3 : present in v8 */ 12433 if (extract32(env->cp15.cptr_el[3], 10, 1)) { 12434 /* Trap all FP ops to EL3 */ 12435 return 3; 12436 } 12437 #endif 12438 return 0; 12439 } 12440 12441 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 12442 target_ulong *cs_base, uint32_t *pflags) 12443 { 12444 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 12445 int fp_el = fp_exception_el(env); 12446 uint32_t flags; 12447 12448 if (is_a64(env)) { 12449 int sve_el = sve_exception_el(env); 12450 uint32_t zcr_len; 12451 12452 *pc = env->pc; 12453 flags = ARM_TBFLAG_AARCH64_STATE_MASK; 12454 /* Get control bits for tagged addresses */ 12455 flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT); 12456 flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT); 12457 flags |= sve_el << ARM_TBFLAG_SVEEXC_EL_SHIFT; 12458 12459 /* If SVE is disabled, but FP is enabled, 12460 then the effective len is 0. */ 12461 if (sve_el != 0 && fp_el == 0) { 12462 zcr_len = 0; 12463 } else { 12464 int current_el = arm_current_el(env); 12465 ARMCPU *cpu = arm_env_get_cpu(env); 12466 12467 zcr_len = cpu->sve_max_vq - 1; 12468 if (current_el <= 1) { 12469 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]); 12470 } 12471 if (current_el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { 12472 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); 12473 } 12474 if (current_el < 3 && arm_feature(env, ARM_FEATURE_EL3)) { 12475 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); 12476 } 12477 } 12478 flags |= zcr_len << ARM_TBFLAG_ZCR_LEN_SHIFT; 12479 } else { 12480 *pc = env->regs[15]; 12481 flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT) 12482 | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT) 12483 | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT) 12484 | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT) 12485 | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT); 12486 if (!(access_secure_reg(env))) { 12487 flags |= ARM_TBFLAG_NS_MASK; 12488 } 12489 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30) 12490 || arm_el_is_aa64(env, 1)) { 12491 flags |= ARM_TBFLAG_VFPEN_MASK; 12492 } 12493 flags |= (extract32(env->cp15.c15_cpar, 0, 2) 12494 << ARM_TBFLAG_XSCALE_CPAR_SHIFT); 12495 } 12496 12497 flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT); 12498 12499 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 12500 * states defined in the ARM ARM for software singlestep: 12501 * SS_ACTIVE PSTATE.SS State 12502 * 0 x Inactive (the TB flag for SS is always 0) 12503 * 1 0 Active-pending 12504 * 1 1 Active-not-pending 12505 */ 12506 if (arm_singlestep_active(env)) { 12507 flags |= ARM_TBFLAG_SS_ACTIVE_MASK; 12508 if (is_a64(env)) { 12509 if (env->pstate & PSTATE_SS) { 12510 flags |= ARM_TBFLAG_PSTATE_SS_MASK; 12511 } 12512 } else { 12513 if (env->uncached_cpsr & PSTATE_SS) { 12514 flags |= ARM_TBFLAG_PSTATE_SS_MASK; 12515 } 12516 } 12517 } 12518 if (arm_cpu_data_is_big_endian(env)) { 12519 flags |= ARM_TBFLAG_BE_DATA_MASK; 12520 } 12521 flags |= fp_el << ARM_TBFLAG_FPEXC_EL_SHIFT; 12522 12523 if (arm_v7m_is_handler_mode(env)) { 12524 flags |= ARM_TBFLAG_HANDLER_MASK; 12525 } 12526 12527 *pflags = flags; 12528 *cs_base = 0; 12529 } 12530