1 /* 2 * ARM generic helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/units.h" 11 #include "target/arm/idau.h" 12 #include "trace.h" 13 #include "cpu.h" 14 #include "internals.h" 15 #include "exec/gdbstub.h" 16 #include "exec/helper-proto.h" 17 #include "qemu/host-utils.h" 18 #include "qemu/main-loop.h" 19 #include "qemu/bitops.h" 20 #include "qemu/crc32c.h" 21 #include "qemu/qemu-print.h" 22 #include "exec/exec-all.h" 23 #include <zlib.h> /* For crc32 */ 24 #include "hw/irq.h" 25 #include "hw/semihosting/semihost.h" 26 #include "sysemu/cpus.h" 27 #include "sysemu/kvm.h" 28 #include "sysemu/tcg.h" 29 #include "qemu/range.h" 30 #include "qapi/qapi-commands-machine-target.h" 31 #include "qapi/error.h" 32 #include "qemu/guest-random.h" 33 #ifdef CONFIG_TCG 34 #include "arm_ldst.h" 35 #include "exec/cpu_ldst.h" 36 #endif 37 38 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ 39 40 #ifndef CONFIG_USER_ONLY 41 42 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 43 MMUAccessType access_type, ARMMMUIdx mmu_idx, 44 bool s1_is_el0, 45 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 46 target_ulong *page_size_ptr, 47 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 48 #endif 49 50 static void switch_mode(CPUARMState *env, int mode); 51 52 static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) 53 { 54 ARMCPU *cpu = env_archcpu(env); 55 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; 56 57 /* VFP data registers are always little-endian. */ 58 if (reg < nregs) { 59 return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg)); 60 } 61 if (arm_feature(env, ARM_FEATURE_NEON)) { 62 /* Aliases for Q regs. */ 63 nregs += 16; 64 if (reg < nregs) { 65 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 66 return gdb_get_reg128(buf, q[0], q[1]); 67 } 68 } 69 switch (reg - nregs) { 70 case 0: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); break; 71 case 1: return gdb_get_reg32(buf, vfp_get_fpscr(env)); break; 72 case 2: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); break; 73 } 74 return 0; 75 } 76 77 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 78 { 79 ARMCPU *cpu = env_archcpu(env); 80 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; 81 82 if (reg < nregs) { 83 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); 84 return 8; 85 } 86 if (arm_feature(env, ARM_FEATURE_NEON)) { 87 nregs += 16; 88 if (reg < nregs) { 89 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 90 q[0] = ldq_le_p(buf); 91 q[1] = ldq_le_p(buf + 8); 92 return 16; 93 } 94 } 95 switch (reg - nregs) { 96 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; 97 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4; 98 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4; 99 } 100 return 0; 101 } 102 103 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) 104 { 105 switch (reg) { 106 case 0 ... 31: 107 { 108 /* 128 bit FP register - quads are in LE order */ 109 uint64_t *q = aa64_vfp_qreg(env, reg); 110 return gdb_get_reg128(buf, q[1], q[0]); 111 } 112 case 32: 113 /* FPSR */ 114 return gdb_get_reg32(buf, vfp_get_fpsr(env)); 115 case 33: 116 /* FPCR */ 117 return gdb_get_reg32(buf,vfp_get_fpcr(env)); 118 default: 119 return 0; 120 } 121 } 122 123 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 124 { 125 switch (reg) { 126 case 0 ... 31: 127 /* 128 bit FP register */ 128 { 129 uint64_t *q = aa64_vfp_qreg(env, reg); 130 q[0] = ldq_le_p(buf); 131 q[1] = ldq_le_p(buf + 8); 132 return 16; 133 } 134 case 32: 135 /* FPSR */ 136 vfp_set_fpsr(env, ldl_p(buf)); 137 return 4; 138 case 33: 139 /* FPCR */ 140 vfp_set_fpcr(env, ldl_p(buf)); 141 return 4; 142 default: 143 return 0; 144 } 145 } 146 147 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) 148 { 149 assert(ri->fieldoffset); 150 if (cpreg_field_is_64bit(ri)) { 151 return CPREG_FIELD64(env, ri); 152 } else { 153 return CPREG_FIELD32(env, ri); 154 } 155 } 156 157 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 158 uint64_t value) 159 { 160 assert(ri->fieldoffset); 161 if (cpreg_field_is_64bit(ri)) { 162 CPREG_FIELD64(env, ri) = value; 163 } else { 164 CPREG_FIELD32(env, ri) = value; 165 } 166 } 167 168 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) 169 { 170 return (char *)env + ri->fieldoffset; 171 } 172 173 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) 174 { 175 /* Raw read of a coprocessor register (as needed for migration, etc). */ 176 if (ri->type & ARM_CP_CONST) { 177 return ri->resetvalue; 178 } else if (ri->raw_readfn) { 179 return ri->raw_readfn(env, ri); 180 } else if (ri->readfn) { 181 return ri->readfn(env, ri); 182 } else { 183 return raw_read(env, ri); 184 } 185 } 186 187 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, 188 uint64_t v) 189 { 190 /* Raw write of a coprocessor register (as needed for migration, etc). 191 * Note that constant registers are treated as write-ignored; the 192 * caller should check for success by whether a readback gives the 193 * value written. 194 */ 195 if (ri->type & ARM_CP_CONST) { 196 return; 197 } else if (ri->raw_writefn) { 198 ri->raw_writefn(env, ri, v); 199 } else if (ri->writefn) { 200 ri->writefn(env, ri, v); 201 } else { 202 raw_write(env, ri, v); 203 } 204 } 205 206 /** 207 * arm_get/set_gdb_*: get/set a gdb register 208 * @env: the CPU state 209 * @buf: a buffer to copy to/from 210 * @reg: register number (offset from start of group) 211 * 212 * We return the number of bytes copied 213 */ 214 215 static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg) 216 { 217 ARMCPU *cpu = env_archcpu(env); 218 const ARMCPRegInfo *ri; 219 uint32_t key; 220 221 key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg]; 222 ri = get_arm_cp_reginfo(cpu->cp_regs, key); 223 if (ri) { 224 if (cpreg_field_is_64bit(ri)) { 225 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); 226 } else { 227 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); 228 } 229 } 230 return 0; 231 } 232 233 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) 234 { 235 return 0; 236 } 237 238 #ifdef TARGET_AARCH64 239 static int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg) 240 { 241 ARMCPU *cpu = env_archcpu(env); 242 243 switch (reg) { 244 /* The first 32 registers are the zregs */ 245 case 0 ... 31: 246 { 247 int vq, len = 0; 248 for (vq = 0; vq < cpu->sve_max_vq; vq++) { 249 len += gdb_get_reg128(buf, 250 env->vfp.zregs[reg].d[vq * 2 + 1], 251 env->vfp.zregs[reg].d[vq * 2]); 252 } 253 return len; 254 } 255 case 32: 256 return gdb_get_reg32(buf, vfp_get_fpsr(env)); 257 case 33: 258 return gdb_get_reg32(buf, vfp_get_fpcr(env)); 259 /* then 16 predicates and the ffr */ 260 case 34 ... 50: 261 { 262 int preg = reg - 34; 263 int vq, len = 0; 264 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { 265 len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]); 266 } 267 return len; 268 } 269 case 51: 270 { 271 /* 272 * We report in Vector Granules (VG) which is 64bit in a Z reg 273 * while the ZCR works in Vector Quads (VQ) which is 128bit chunks. 274 */ 275 int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1; 276 return gdb_get_reg32(buf, vq * 2); 277 } 278 default: 279 /* gdbstub asked for something out our range */ 280 qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg); 281 break; 282 } 283 284 return 0; 285 } 286 287 static int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg) 288 { 289 ARMCPU *cpu = env_archcpu(env); 290 291 /* The first 32 registers are the zregs */ 292 switch (reg) { 293 /* The first 32 registers are the zregs */ 294 case 0 ... 31: 295 { 296 int vq, len = 0; 297 uint64_t *p = (uint64_t *) buf; 298 for (vq = 0; vq < cpu->sve_max_vq; vq++) { 299 env->vfp.zregs[reg].d[vq * 2 + 1] = *p++; 300 env->vfp.zregs[reg].d[vq * 2] = *p++; 301 len += 16; 302 } 303 return len; 304 } 305 case 32: 306 vfp_set_fpsr(env, *(uint32_t *)buf); 307 return 4; 308 case 33: 309 vfp_set_fpcr(env, *(uint32_t *)buf); 310 return 4; 311 case 34 ... 50: 312 { 313 int preg = reg - 34; 314 int vq, len = 0; 315 uint64_t *p = (uint64_t *) buf; 316 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { 317 env->vfp.pregs[preg].p[vq / 4] = *p++; 318 len += 8; 319 } 320 return len; 321 } 322 case 51: 323 /* cannot set vg via gdbstub */ 324 return 0; 325 default: 326 /* gdbstub asked for something out our range */ 327 break; 328 } 329 330 return 0; 331 } 332 #endif /* TARGET_AARCH64 */ 333 334 static bool raw_accessors_invalid(const ARMCPRegInfo *ri) 335 { 336 /* Return true if the regdef would cause an assertion if you called 337 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a 338 * program bug for it not to have the NO_RAW flag). 339 * NB that returning false here doesn't necessarily mean that calling 340 * read/write_raw_cp_reg() is safe, because we can't distinguish "has 341 * read/write access functions which are safe for raw use" from "has 342 * read/write access functions which have side effects but has forgotten 343 * to provide raw access functions". 344 * The tests here line up with the conditions in read/write_raw_cp_reg() 345 * and assertions in raw_read()/raw_write(). 346 */ 347 if ((ri->type & ARM_CP_CONST) || 348 ri->fieldoffset || 349 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { 350 return false; 351 } 352 return true; 353 } 354 355 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync) 356 { 357 /* Write the coprocessor state from cpu->env to the (index,value) list. */ 358 int i; 359 bool ok = true; 360 361 for (i = 0; i < cpu->cpreg_array_len; i++) { 362 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 363 const ARMCPRegInfo *ri; 364 uint64_t newval; 365 366 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 367 if (!ri) { 368 ok = false; 369 continue; 370 } 371 if (ri->type & ARM_CP_NO_RAW) { 372 continue; 373 } 374 375 newval = read_raw_cp_reg(&cpu->env, ri); 376 if (kvm_sync) { 377 /* 378 * Only sync if the previous list->cpustate sync succeeded. 379 * Rather than tracking the success/failure state for every 380 * item in the list, we just recheck "does the raw write we must 381 * have made in write_list_to_cpustate() read back OK" here. 382 */ 383 uint64_t oldval = cpu->cpreg_values[i]; 384 385 if (oldval == newval) { 386 continue; 387 } 388 389 write_raw_cp_reg(&cpu->env, ri, oldval); 390 if (read_raw_cp_reg(&cpu->env, ri) != oldval) { 391 continue; 392 } 393 394 write_raw_cp_reg(&cpu->env, ri, newval); 395 } 396 cpu->cpreg_values[i] = newval; 397 } 398 return ok; 399 } 400 401 bool write_list_to_cpustate(ARMCPU *cpu) 402 { 403 int i; 404 bool ok = true; 405 406 for (i = 0; i < cpu->cpreg_array_len; i++) { 407 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 408 uint64_t v = cpu->cpreg_values[i]; 409 const ARMCPRegInfo *ri; 410 411 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 412 if (!ri) { 413 ok = false; 414 continue; 415 } 416 if (ri->type & ARM_CP_NO_RAW) { 417 continue; 418 } 419 /* Write value and confirm it reads back as written 420 * (to catch read-only registers and partially read-only 421 * registers where the incoming migration value doesn't match) 422 */ 423 write_raw_cp_reg(&cpu->env, ri, v); 424 if (read_raw_cp_reg(&cpu->env, ri) != v) { 425 ok = false; 426 } 427 } 428 return ok; 429 } 430 431 static void add_cpreg_to_list(gpointer key, gpointer opaque) 432 { 433 ARMCPU *cpu = opaque; 434 uint64_t regidx; 435 const ARMCPRegInfo *ri; 436 437 regidx = *(uint32_t *)key; 438 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 439 440 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 441 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); 442 /* The value array need not be initialized at this point */ 443 cpu->cpreg_array_len++; 444 } 445 } 446 447 static void count_cpreg(gpointer key, gpointer opaque) 448 { 449 ARMCPU *cpu = opaque; 450 uint64_t regidx; 451 const ARMCPRegInfo *ri; 452 453 regidx = *(uint32_t *)key; 454 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 455 456 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 457 cpu->cpreg_array_len++; 458 } 459 } 460 461 static gint cpreg_key_compare(gconstpointer a, gconstpointer b) 462 { 463 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); 464 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); 465 466 if (aidx > bidx) { 467 return 1; 468 } 469 if (aidx < bidx) { 470 return -1; 471 } 472 return 0; 473 } 474 475 void init_cpreg_list(ARMCPU *cpu) 476 { 477 /* Initialise the cpreg_tuples[] array based on the cp_regs hash. 478 * Note that we require cpreg_tuples[] to be sorted by key ID. 479 */ 480 GList *keys; 481 int arraylen; 482 483 keys = g_hash_table_get_keys(cpu->cp_regs); 484 keys = g_list_sort(keys, cpreg_key_compare); 485 486 cpu->cpreg_array_len = 0; 487 488 g_list_foreach(keys, count_cpreg, cpu); 489 490 arraylen = cpu->cpreg_array_len; 491 cpu->cpreg_indexes = g_new(uint64_t, arraylen); 492 cpu->cpreg_values = g_new(uint64_t, arraylen); 493 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); 494 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); 495 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 496 cpu->cpreg_array_len = 0; 497 498 g_list_foreach(keys, add_cpreg_to_list, cpu); 499 500 assert(cpu->cpreg_array_len == arraylen); 501 502 g_list_free(keys); 503 } 504 505 /* 506 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but 507 * they are accessible when EL3 is using AArch64 regardless of EL3.NS. 508 * 509 * access_el3_aa32ns: Used to check AArch32 register views. 510 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views. 511 */ 512 static CPAccessResult access_el3_aa32ns(CPUARMState *env, 513 const ARMCPRegInfo *ri, 514 bool isread) 515 { 516 bool secure = arm_is_secure_below_el3(env); 517 518 assert(!arm_el_is_aa64(env, 3)); 519 if (secure) { 520 return CP_ACCESS_TRAP_UNCATEGORIZED; 521 } 522 return CP_ACCESS_OK; 523 } 524 525 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env, 526 const ARMCPRegInfo *ri, 527 bool isread) 528 { 529 if (!arm_el_is_aa64(env, 3)) { 530 return access_el3_aa32ns(env, ri, isread); 531 } 532 return CP_ACCESS_OK; 533 } 534 535 /* Some secure-only AArch32 registers trap to EL3 if used from 536 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). 537 * Note that an access from Secure EL1 can only happen if EL3 is AArch64. 538 * We assume that the .access field is set to PL1_RW. 539 */ 540 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, 541 const ARMCPRegInfo *ri, 542 bool isread) 543 { 544 if (arm_current_el(env) == 3) { 545 return CP_ACCESS_OK; 546 } 547 if (arm_is_secure_below_el3(env)) { 548 return CP_ACCESS_TRAP_EL3; 549 } 550 /* This will be EL1 NS and EL2 NS, which just UNDEF */ 551 return CP_ACCESS_TRAP_UNCATEGORIZED; 552 } 553 554 /* Check for traps to "powerdown debug" registers, which are controlled 555 * by MDCR.TDOSA 556 */ 557 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 558 bool isread) 559 { 560 int el = arm_current_el(env); 561 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) || 562 (env->cp15.mdcr_el2 & MDCR_TDE) || 563 (arm_hcr_el2_eff(env) & HCR_TGE); 564 565 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) { 566 return CP_ACCESS_TRAP_EL2; 567 } 568 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 569 return CP_ACCESS_TRAP_EL3; 570 } 571 return CP_ACCESS_OK; 572 } 573 574 /* Check for traps to "debug ROM" registers, which are controlled 575 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 576 */ 577 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 578 bool isread) 579 { 580 int el = arm_current_el(env); 581 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) || 582 (env->cp15.mdcr_el2 & MDCR_TDE) || 583 (arm_hcr_el2_eff(env) & HCR_TGE); 584 585 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) { 586 return CP_ACCESS_TRAP_EL2; 587 } 588 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 589 return CP_ACCESS_TRAP_EL3; 590 } 591 return CP_ACCESS_OK; 592 } 593 594 /* Check for traps to general debug registers, which are controlled 595 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 596 */ 597 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 598 bool isread) 599 { 600 int el = arm_current_el(env); 601 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) || 602 (env->cp15.mdcr_el2 & MDCR_TDE) || 603 (arm_hcr_el2_eff(env) & HCR_TGE); 604 605 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) { 606 return CP_ACCESS_TRAP_EL2; 607 } 608 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 609 return CP_ACCESS_TRAP_EL3; 610 } 611 return CP_ACCESS_OK; 612 } 613 614 /* Check for traps to performance monitor registers, which are controlled 615 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. 616 */ 617 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, 618 bool isread) 619 { 620 int el = arm_current_el(env); 621 622 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 623 && !arm_is_secure_below_el3(env)) { 624 return CP_ACCESS_TRAP_EL2; 625 } 626 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 627 return CP_ACCESS_TRAP_EL3; 628 } 629 return CP_ACCESS_OK; 630 } 631 632 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */ 633 static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri, 634 bool isread) 635 { 636 if (arm_current_el(env) == 1) { 637 uint64_t trap = isread ? HCR_TRVM : HCR_TVM; 638 if (arm_hcr_el2_eff(env) & trap) { 639 return CP_ACCESS_TRAP_EL2; 640 } 641 } 642 return CP_ACCESS_OK; 643 } 644 645 /* Check for traps from EL1 due to HCR_EL2.TSW. */ 646 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri, 647 bool isread) 648 { 649 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) { 650 return CP_ACCESS_TRAP_EL2; 651 } 652 return CP_ACCESS_OK; 653 } 654 655 /* Check for traps from EL1 due to HCR_EL2.TACR. */ 656 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri, 657 bool isread) 658 { 659 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) { 660 return CP_ACCESS_TRAP_EL2; 661 } 662 return CP_ACCESS_OK; 663 } 664 665 /* Check for traps from EL1 due to HCR_EL2.TTLB. */ 666 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri, 667 bool isread) 668 { 669 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) { 670 return CP_ACCESS_TRAP_EL2; 671 } 672 return CP_ACCESS_OK; 673 } 674 675 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 676 { 677 ARMCPU *cpu = env_archcpu(env); 678 679 raw_write(env, ri, value); 680 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ 681 } 682 683 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 684 { 685 ARMCPU *cpu = env_archcpu(env); 686 687 if (raw_read(env, ri) != value) { 688 /* Unlike real hardware the qemu TLB uses virtual addresses, 689 * not modified virtual addresses, so this causes a TLB flush. 690 */ 691 tlb_flush(CPU(cpu)); 692 raw_write(env, ri, value); 693 } 694 } 695 696 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 697 uint64_t value) 698 { 699 ARMCPU *cpu = env_archcpu(env); 700 701 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 702 && !extended_addresses_enabled(env)) { 703 /* For VMSA (when not using the LPAE long descriptor page table 704 * format) this register includes the ASID, so do a TLB flush. 705 * For PMSA it is purely a process ID and no action is needed. 706 */ 707 tlb_flush(CPU(cpu)); 708 } 709 raw_write(env, ri, value); 710 } 711 712 /* IS variants of TLB operations must affect all cores */ 713 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 714 uint64_t value) 715 { 716 CPUState *cs = env_cpu(env); 717 718 tlb_flush_all_cpus_synced(cs); 719 } 720 721 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 722 uint64_t value) 723 { 724 CPUState *cs = env_cpu(env); 725 726 tlb_flush_all_cpus_synced(cs); 727 } 728 729 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 730 uint64_t value) 731 { 732 CPUState *cs = env_cpu(env); 733 734 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 735 } 736 737 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 738 uint64_t value) 739 { 740 CPUState *cs = env_cpu(env); 741 742 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 743 } 744 745 /* 746 * Non-IS variants of TLB operations are upgraded to 747 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to 748 * force broadcast of these operations. 749 */ 750 static bool tlb_force_broadcast(CPUARMState *env) 751 { 752 return (env->cp15.hcr_el2 & HCR_FB) && 753 arm_current_el(env) == 1 && arm_is_secure_below_el3(env); 754 } 755 756 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, 757 uint64_t value) 758 { 759 /* Invalidate all (TLBIALL) */ 760 CPUState *cs = env_cpu(env); 761 762 if (tlb_force_broadcast(env)) { 763 tlb_flush_all_cpus_synced(cs); 764 } else { 765 tlb_flush(cs); 766 } 767 } 768 769 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, 770 uint64_t value) 771 { 772 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 773 CPUState *cs = env_cpu(env); 774 775 value &= TARGET_PAGE_MASK; 776 if (tlb_force_broadcast(env)) { 777 tlb_flush_page_all_cpus_synced(cs, value); 778 } else { 779 tlb_flush_page(cs, value); 780 } 781 } 782 783 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, 784 uint64_t value) 785 { 786 /* Invalidate by ASID (TLBIASID) */ 787 CPUState *cs = env_cpu(env); 788 789 if (tlb_force_broadcast(env)) { 790 tlb_flush_all_cpus_synced(cs); 791 } else { 792 tlb_flush(cs); 793 } 794 } 795 796 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, 797 uint64_t value) 798 { 799 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 800 CPUState *cs = env_cpu(env); 801 802 value &= TARGET_PAGE_MASK; 803 if (tlb_force_broadcast(env)) { 804 tlb_flush_page_all_cpus_synced(cs, value); 805 } else { 806 tlb_flush_page(cs, value); 807 } 808 } 809 810 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, 811 uint64_t value) 812 { 813 CPUState *cs = env_cpu(env); 814 815 tlb_flush_by_mmuidx(cs, 816 ARMMMUIdxBit_E10_1 | 817 ARMMMUIdxBit_E10_1_PAN | 818 ARMMMUIdxBit_E10_0); 819 } 820 821 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 822 uint64_t value) 823 { 824 CPUState *cs = env_cpu(env); 825 826 tlb_flush_by_mmuidx_all_cpus_synced(cs, 827 ARMMMUIdxBit_E10_1 | 828 ARMMMUIdxBit_E10_1_PAN | 829 ARMMMUIdxBit_E10_0); 830 } 831 832 833 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 834 uint64_t value) 835 { 836 CPUState *cs = env_cpu(env); 837 838 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2); 839 } 840 841 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 842 uint64_t value) 843 { 844 CPUState *cs = env_cpu(env); 845 846 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2); 847 } 848 849 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 850 uint64_t value) 851 { 852 CPUState *cs = env_cpu(env); 853 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 854 855 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2); 856 } 857 858 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 859 uint64_t value) 860 { 861 CPUState *cs = env_cpu(env); 862 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 863 864 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 865 ARMMMUIdxBit_E2); 866 } 867 868 static const ARMCPRegInfo cp_reginfo[] = { 869 /* Define the secure and non-secure FCSE identifier CP registers 870 * separately because there is no secure bank in V8 (no _EL3). This allows 871 * the secure register to be properly reset and migrated. There is also no 872 * v8 EL1 version of the register so the non-secure instance stands alone. 873 */ 874 { .name = "FCSEIDR", 875 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 876 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 877 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), 878 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 879 { .name = "FCSEIDR_S", 880 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 881 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 882 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), 883 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 884 /* Define the secure and non-secure context identifier CP registers 885 * separately because there is no secure bank in V8 (no _EL3). This allows 886 * the secure register to be properly reset and migrated. In the 887 * non-secure case, the 32-bit register will have reset and migration 888 * disabled during registration as it is handled by the 64-bit instance. 889 */ 890 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, 891 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 892 .access = PL1_RW, .accessfn = access_tvm_trvm, 893 .secure = ARM_CP_SECSTATE_NS, 894 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), 895 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 896 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, 897 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 898 .access = PL1_RW, .accessfn = access_tvm_trvm, 899 .secure = ARM_CP_SECSTATE_S, 900 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), 901 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 902 REGINFO_SENTINEL 903 }; 904 905 static const ARMCPRegInfo not_v8_cp_reginfo[] = { 906 /* NB: Some of these registers exist in v8 but with more precise 907 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). 908 */ 909 /* MMU Domain access control / MPU write buffer control */ 910 { .name = "DACR", 911 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, 912 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 913 .writefn = dacr_write, .raw_writefn = raw_write, 914 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 915 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 916 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. 917 * For v6 and v5, these mappings are overly broad. 918 */ 919 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, 920 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 921 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, 922 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 923 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, 924 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 925 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, 926 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 927 /* Cache maintenance ops; some of this space may be overridden later. */ 928 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 929 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 930 .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, 931 REGINFO_SENTINEL 932 }; 933 934 static const ARMCPRegInfo not_v6_cp_reginfo[] = { 935 /* Not all pre-v6 cores implemented this WFI, so this is slightly 936 * over-broad. 937 */ 938 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, 939 .access = PL1_W, .type = ARM_CP_WFI }, 940 REGINFO_SENTINEL 941 }; 942 943 static const ARMCPRegInfo not_v7_cp_reginfo[] = { 944 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which 945 * is UNPREDICTABLE; we choose to NOP as most implementations do). 946 */ 947 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 948 .access = PL1_W, .type = ARM_CP_WFI }, 949 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice 950 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and 951 * OMAPCP will override this space. 952 */ 953 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, 954 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), 955 .resetvalue = 0 }, 956 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, 957 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), 958 .resetvalue = 0 }, 959 /* v6 doesn't have the cache ID registers but Linux reads them anyway */ 960 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, 961 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 962 .resetvalue = 0 }, 963 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; 964 * implementing it as RAZ means the "debug architecture version" bits 965 * will read as a reserved value, which should cause Linux to not try 966 * to use the debug hardware. 967 */ 968 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 969 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 970 /* MMU TLB control. Note that the wildcarding means we cover not just 971 * the unified TLB ops but also the dside/iside/inner-shareable variants. 972 */ 973 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, 974 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, 975 .type = ARM_CP_NO_RAW }, 976 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, 977 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, 978 .type = ARM_CP_NO_RAW }, 979 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, 980 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, 981 .type = ARM_CP_NO_RAW }, 982 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, 983 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, 984 .type = ARM_CP_NO_RAW }, 985 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, 986 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, 987 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, 988 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, 989 REGINFO_SENTINEL 990 }; 991 992 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, 993 uint64_t value) 994 { 995 uint32_t mask = 0; 996 997 /* In ARMv8 most bits of CPACR_EL1 are RES0. */ 998 if (!arm_feature(env, ARM_FEATURE_V8)) { 999 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. 1000 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. 1001 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. 1002 */ 1003 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { 1004 /* VFP coprocessor: cp10 & cp11 [23:20] */ 1005 mask |= (1 << 31) | (1 << 30) | (0xf << 20); 1006 1007 if (!arm_feature(env, ARM_FEATURE_NEON)) { 1008 /* ASEDIS [31] bit is RAO/WI */ 1009 value |= (1 << 31); 1010 } 1011 1012 /* VFPv3 and upwards with NEON implement 32 double precision 1013 * registers (D0-D31). 1014 */ 1015 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) { 1016 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ 1017 value |= (1 << 30); 1018 } 1019 } 1020 value &= mask; 1021 } 1022 1023 /* 1024 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 1025 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 1026 */ 1027 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 1028 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 1029 value &= ~(0xf << 20); 1030 value |= env->cp15.cpacr_el1 & (0xf << 20); 1031 } 1032 1033 env->cp15.cpacr_el1 = value; 1034 } 1035 1036 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1037 { 1038 /* 1039 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 1040 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 1041 */ 1042 uint64_t value = env->cp15.cpacr_el1; 1043 1044 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 1045 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 1046 value &= ~(0xf << 20); 1047 } 1048 return value; 1049 } 1050 1051 1052 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1053 { 1054 /* Call cpacr_write() so that we reset with the correct RAO bits set 1055 * for our CPU features. 1056 */ 1057 cpacr_write(env, ri, 0); 1058 } 1059 1060 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 1061 bool isread) 1062 { 1063 if (arm_feature(env, ARM_FEATURE_V8)) { 1064 /* Check if CPACR accesses are to be trapped to EL2 */ 1065 if (arm_current_el(env) == 1 && 1066 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) { 1067 return CP_ACCESS_TRAP_EL2; 1068 /* Check if CPACR accesses are to be trapped to EL3 */ 1069 } else if (arm_current_el(env) < 3 && 1070 (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 1071 return CP_ACCESS_TRAP_EL3; 1072 } 1073 } 1074 1075 return CP_ACCESS_OK; 1076 } 1077 1078 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, 1079 bool isread) 1080 { 1081 /* Check if CPTR accesses are set to trap to EL3 */ 1082 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 1083 return CP_ACCESS_TRAP_EL3; 1084 } 1085 1086 return CP_ACCESS_OK; 1087 } 1088 1089 static const ARMCPRegInfo v6_cp_reginfo[] = { 1090 /* prefetch by MVA in v6, NOP in v7 */ 1091 { .name = "MVA_prefetch", 1092 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, 1093 .access = PL1_W, .type = ARM_CP_NOP }, 1094 /* We need to break the TB after ISB to execute self-modifying code 1095 * correctly and also to take any pending interrupts immediately. 1096 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. 1097 */ 1098 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, 1099 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, 1100 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, 1101 .access = PL0_W, .type = ARM_CP_NOP }, 1102 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, 1103 .access = PL0_W, .type = ARM_CP_NOP }, 1104 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, 1105 .access = PL1_RW, .accessfn = access_tvm_trvm, 1106 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), 1107 offsetof(CPUARMState, cp15.ifar_ns) }, 1108 .resetvalue = 0, }, 1109 /* Watchpoint Fault Address Register : should actually only be present 1110 * for 1136, 1176, 11MPCore. 1111 */ 1112 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, 1113 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, 1114 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, 1115 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, 1116 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), 1117 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, 1118 REGINFO_SENTINEL 1119 }; 1120 1121 /* Definitions for the PMU registers */ 1122 #define PMCRN_MASK 0xf800 1123 #define PMCRN_SHIFT 11 1124 #define PMCRLC 0x40 1125 #define PMCRDP 0x20 1126 #define PMCRX 0x10 1127 #define PMCRD 0x8 1128 #define PMCRC 0x4 1129 #define PMCRP 0x2 1130 #define PMCRE 0x1 1131 /* 1132 * Mask of PMCR bits writeable by guest (not including WO bits like C, P, 1133 * which can be written as 1 to trigger behaviour but which stay RAZ). 1134 */ 1135 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1136 1137 #define PMXEVTYPER_P 0x80000000 1138 #define PMXEVTYPER_U 0x40000000 1139 #define PMXEVTYPER_NSK 0x20000000 1140 #define PMXEVTYPER_NSU 0x10000000 1141 #define PMXEVTYPER_NSH 0x08000000 1142 #define PMXEVTYPER_M 0x04000000 1143 #define PMXEVTYPER_MT 0x02000000 1144 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1145 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1146 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1147 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1148 PMXEVTYPER_EVTCOUNT) 1149 1150 #define PMCCFILTR 0xf8000000 1151 #define PMCCFILTR_M PMXEVTYPER_M 1152 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1153 1154 static inline uint32_t pmu_num_counters(CPUARMState *env) 1155 { 1156 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; 1157 } 1158 1159 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1160 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1161 { 1162 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); 1163 } 1164 1165 typedef struct pm_event { 1166 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ 1167 /* If the event is supported on this CPU (used to generate PMCEID[01]) */ 1168 bool (*supported)(CPUARMState *); 1169 /* 1170 * Retrieve the current count of the underlying event. The programmed 1171 * counters hold a difference from the return value from this function 1172 */ 1173 uint64_t (*get_count)(CPUARMState *); 1174 /* 1175 * Return how many nanoseconds it will take (at a minimum) for count events 1176 * to occur. A negative value indicates the counter will never overflow, or 1177 * that the counter has otherwise arranged for the overflow bit to be set 1178 * and the PMU interrupt to be raised on overflow. 1179 */ 1180 int64_t (*ns_per_count)(uint64_t); 1181 } pm_event; 1182 1183 static bool event_always_supported(CPUARMState *env) 1184 { 1185 return true; 1186 } 1187 1188 static uint64_t swinc_get_count(CPUARMState *env) 1189 { 1190 /* 1191 * SW_INCR events are written directly to the pmevcntr's by writes to 1192 * PMSWINC, so there is no underlying count maintained by the PMU itself 1193 */ 1194 return 0; 1195 } 1196 1197 static int64_t swinc_ns_per(uint64_t ignored) 1198 { 1199 return -1; 1200 } 1201 1202 /* 1203 * Return the underlying cycle count for the PMU cycle counters. If we're in 1204 * usermode, simply return 0. 1205 */ 1206 static uint64_t cycles_get_count(CPUARMState *env) 1207 { 1208 #ifndef CONFIG_USER_ONLY 1209 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1210 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1211 #else 1212 return cpu_get_host_ticks(); 1213 #endif 1214 } 1215 1216 #ifndef CONFIG_USER_ONLY 1217 static int64_t cycles_ns_per(uint64_t cycles) 1218 { 1219 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; 1220 } 1221 1222 static bool instructions_supported(CPUARMState *env) 1223 { 1224 return use_icount == 1 /* Precise instruction counting */; 1225 } 1226 1227 static uint64_t instructions_get_count(CPUARMState *env) 1228 { 1229 return (uint64_t)cpu_get_icount_raw(); 1230 } 1231 1232 static int64_t instructions_ns_per(uint64_t icount) 1233 { 1234 return cpu_icount_to_ns((int64_t)icount); 1235 } 1236 #endif 1237 1238 static bool pmu_8_1_events_supported(CPUARMState *env) 1239 { 1240 /* For events which are supported in any v8.1 PMU */ 1241 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env)); 1242 } 1243 1244 static bool pmu_8_4_events_supported(CPUARMState *env) 1245 { 1246 /* For events which are supported in any v8.1 PMU */ 1247 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env)); 1248 } 1249 1250 static uint64_t zero_event_get_count(CPUARMState *env) 1251 { 1252 /* For events which on QEMU never fire, so their count is always zero */ 1253 return 0; 1254 } 1255 1256 static int64_t zero_event_ns_per(uint64_t cycles) 1257 { 1258 /* An event which never fires can never overflow */ 1259 return -1; 1260 } 1261 1262 static const pm_event pm_events[] = { 1263 { .number = 0x000, /* SW_INCR */ 1264 .supported = event_always_supported, 1265 .get_count = swinc_get_count, 1266 .ns_per_count = swinc_ns_per, 1267 }, 1268 #ifndef CONFIG_USER_ONLY 1269 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ 1270 .supported = instructions_supported, 1271 .get_count = instructions_get_count, 1272 .ns_per_count = instructions_ns_per, 1273 }, 1274 { .number = 0x011, /* CPU_CYCLES, Cycle */ 1275 .supported = event_always_supported, 1276 .get_count = cycles_get_count, 1277 .ns_per_count = cycles_ns_per, 1278 }, 1279 #endif 1280 { .number = 0x023, /* STALL_FRONTEND */ 1281 .supported = pmu_8_1_events_supported, 1282 .get_count = zero_event_get_count, 1283 .ns_per_count = zero_event_ns_per, 1284 }, 1285 { .number = 0x024, /* STALL_BACKEND */ 1286 .supported = pmu_8_1_events_supported, 1287 .get_count = zero_event_get_count, 1288 .ns_per_count = zero_event_ns_per, 1289 }, 1290 { .number = 0x03c, /* STALL */ 1291 .supported = pmu_8_4_events_supported, 1292 .get_count = zero_event_get_count, 1293 .ns_per_count = zero_event_ns_per, 1294 }, 1295 }; 1296 1297 /* 1298 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of 1299 * events (i.e. the statistical profiling extension), this implementation 1300 * should first be updated to something sparse instead of the current 1301 * supported_event_map[] array. 1302 */ 1303 #define MAX_EVENT_ID 0x3c 1304 #define UNSUPPORTED_EVENT UINT16_MAX 1305 static uint16_t supported_event_map[MAX_EVENT_ID + 1]; 1306 1307 /* 1308 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map 1309 * of ARM event numbers to indices in our pm_events array. 1310 * 1311 * Note: Events in the 0x40XX range are not currently supported. 1312 */ 1313 void pmu_init(ARMCPU *cpu) 1314 { 1315 unsigned int i; 1316 1317 /* 1318 * Empty supported_event_map and cpu->pmceid[01] before adding supported 1319 * events to them 1320 */ 1321 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { 1322 supported_event_map[i] = UNSUPPORTED_EVENT; 1323 } 1324 cpu->pmceid0 = 0; 1325 cpu->pmceid1 = 0; 1326 1327 for (i = 0; i < ARRAY_SIZE(pm_events); i++) { 1328 const pm_event *cnt = &pm_events[i]; 1329 assert(cnt->number <= MAX_EVENT_ID); 1330 /* We do not currently support events in the 0x40xx range */ 1331 assert(cnt->number <= 0x3f); 1332 1333 if (cnt->supported(&cpu->env)) { 1334 supported_event_map[cnt->number] = i; 1335 uint64_t event_mask = 1ULL << (cnt->number & 0x1f); 1336 if (cnt->number & 0x20) { 1337 cpu->pmceid1 |= event_mask; 1338 } else { 1339 cpu->pmceid0 |= event_mask; 1340 } 1341 } 1342 } 1343 } 1344 1345 /* 1346 * Check at runtime whether a PMU event is supported for the current machine 1347 */ 1348 static bool event_supported(uint16_t number) 1349 { 1350 if (number > MAX_EVENT_ID) { 1351 return false; 1352 } 1353 return supported_event_map[number] != UNSUPPORTED_EVENT; 1354 } 1355 1356 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, 1357 bool isread) 1358 { 1359 /* Performance monitor registers user accessibility is controlled 1360 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable 1361 * trapping to EL2 or EL3 for other accesses. 1362 */ 1363 int el = arm_current_el(env); 1364 1365 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { 1366 return CP_ACCESS_TRAP; 1367 } 1368 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 1369 && !arm_is_secure_below_el3(env)) { 1370 return CP_ACCESS_TRAP_EL2; 1371 } 1372 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 1373 return CP_ACCESS_TRAP_EL3; 1374 } 1375 1376 return CP_ACCESS_OK; 1377 } 1378 1379 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, 1380 const ARMCPRegInfo *ri, 1381 bool isread) 1382 { 1383 /* ER: event counter read trap control */ 1384 if (arm_feature(env, ARM_FEATURE_V8) 1385 && arm_current_el(env) == 0 1386 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 1387 && isread) { 1388 return CP_ACCESS_OK; 1389 } 1390 1391 return pmreg_access(env, ri, isread); 1392 } 1393 1394 static CPAccessResult pmreg_access_swinc(CPUARMState *env, 1395 const ARMCPRegInfo *ri, 1396 bool isread) 1397 { 1398 /* SW: software increment write trap control */ 1399 if (arm_feature(env, ARM_FEATURE_V8) 1400 && arm_current_el(env) == 0 1401 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 1402 && !isread) { 1403 return CP_ACCESS_OK; 1404 } 1405 1406 return pmreg_access(env, ri, isread); 1407 } 1408 1409 static CPAccessResult pmreg_access_selr(CPUARMState *env, 1410 const ARMCPRegInfo *ri, 1411 bool isread) 1412 { 1413 /* ER: event counter read trap control */ 1414 if (arm_feature(env, ARM_FEATURE_V8) 1415 && arm_current_el(env) == 0 1416 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { 1417 return CP_ACCESS_OK; 1418 } 1419 1420 return pmreg_access(env, ri, isread); 1421 } 1422 1423 static CPAccessResult pmreg_access_ccntr(CPUARMState *env, 1424 const ARMCPRegInfo *ri, 1425 bool isread) 1426 { 1427 /* CR: cycle counter read trap control */ 1428 if (arm_feature(env, ARM_FEATURE_V8) 1429 && arm_current_el(env) == 0 1430 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 1431 && isread) { 1432 return CP_ACCESS_OK; 1433 } 1434 1435 return pmreg_access(env, ri, isread); 1436 } 1437 1438 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using 1439 * the current EL, security state, and register configuration. 1440 */ 1441 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) 1442 { 1443 uint64_t filter; 1444 bool e, p, u, nsk, nsu, nsh, m; 1445 bool enabled, prohibited, filtered; 1446 bool secure = arm_is_secure(env); 1447 int el = arm_current_el(env); 1448 uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; 1449 1450 if (!arm_feature(env, ARM_FEATURE_PMU)) { 1451 return false; 1452 } 1453 1454 if (!arm_feature(env, ARM_FEATURE_EL2) || 1455 (counter < hpmn || counter == 31)) { 1456 e = env->cp15.c9_pmcr & PMCRE; 1457 } else { 1458 e = env->cp15.mdcr_el2 & MDCR_HPME; 1459 } 1460 enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); 1461 1462 if (!secure) { 1463 if (el == 2 && (counter < hpmn || counter == 31)) { 1464 prohibited = env->cp15.mdcr_el2 & MDCR_HPMD; 1465 } else { 1466 prohibited = false; 1467 } 1468 } else { 1469 prohibited = arm_feature(env, ARM_FEATURE_EL3) && 1470 (env->cp15.mdcr_el3 & MDCR_SPME); 1471 } 1472 1473 if (prohibited && counter == 31) { 1474 prohibited = env->cp15.c9_pmcr & PMCRDP; 1475 } 1476 1477 if (counter == 31) { 1478 filter = env->cp15.pmccfiltr_el0; 1479 } else { 1480 filter = env->cp15.c14_pmevtyper[counter]; 1481 } 1482 1483 p = filter & PMXEVTYPER_P; 1484 u = filter & PMXEVTYPER_U; 1485 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); 1486 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); 1487 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); 1488 m = arm_el_is_aa64(env, 1) && 1489 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); 1490 1491 if (el == 0) { 1492 filtered = secure ? u : u != nsu; 1493 } else if (el == 1) { 1494 filtered = secure ? p : p != nsk; 1495 } else if (el == 2) { 1496 filtered = !nsh; 1497 } else { /* EL3 */ 1498 filtered = m != p; 1499 } 1500 1501 if (counter != 31) { 1502 /* 1503 * If not checking PMCCNTR, ensure the counter is setup to an event we 1504 * support 1505 */ 1506 uint16_t event = filter & PMXEVTYPER_EVTCOUNT; 1507 if (!event_supported(event)) { 1508 return false; 1509 } 1510 } 1511 1512 return enabled && !prohibited && !filtered; 1513 } 1514 1515 static void pmu_update_irq(CPUARMState *env) 1516 { 1517 ARMCPU *cpu = env_archcpu(env); 1518 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && 1519 (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); 1520 } 1521 1522 /* 1523 * Ensure c15_ccnt is the guest-visible count so that operations such as 1524 * enabling/disabling the counter or filtering, modifying the count itself, 1525 * etc. can be done logically. This is essentially a no-op if the counter is 1526 * not enabled at the time of the call. 1527 */ 1528 static void pmccntr_op_start(CPUARMState *env) 1529 { 1530 uint64_t cycles = cycles_get_count(env); 1531 1532 if (pmu_counter_enabled(env, 31)) { 1533 uint64_t eff_cycles = cycles; 1534 if (env->cp15.c9_pmcr & PMCRD) { 1535 /* Increment once every 64 processor clock cycles */ 1536 eff_cycles /= 64; 1537 } 1538 1539 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; 1540 1541 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ 1542 1ull << 63 : 1ull << 31; 1543 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { 1544 env->cp15.c9_pmovsr |= (1 << 31); 1545 pmu_update_irq(env); 1546 } 1547 1548 env->cp15.c15_ccnt = new_pmccntr; 1549 } 1550 env->cp15.c15_ccnt_delta = cycles; 1551 } 1552 1553 /* 1554 * If PMCCNTR is enabled, recalculate the delta between the clock and the 1555 * guest-visible count. A call to pmccntr_op_finish should follow every call to 1556 * pmccntr_op_start. 1557 */ 1558 static void pmccntr_op_finish(CPUARMState *env) 1559 { 1560 if (pmu_counter_enabled(env, 31)) { 1561 #ifndef CONFIG_USER_ONLY 1562 /* Calculate when the counter will next overflow */ 1563 uint64_t remaining_cycles = -env->cp15.c15_ccnt; 1564 if (!(env->cp15.c9_pmcr & PMCRLC)) { 1565 remaining_cycles = (uint32_t)remaining_cycles; 1566 } 1567 int64_t overflow_in = cycles_ns_per(remaining_cycles); 1568 1569 if (overflow_in > 0) { 1570 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1571 overflow_in; 1572 ARMCPU *cpu = env_archcpu(env); 1573 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1574 } 1575 #endif 1576 1577 uint64_t prev_cycles = env->cp15.c15_ccnt_delta; 1578 if (env->cp15.c9_pmcr & PMCRD) { 1579 /* Increment once every 64 processor clock cycles */ 1580 prev_cycles /= 64; 1581 } 1582 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; 1583 } 1584 } 1585 1586 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) 1587 { 1588 1589 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1590 uint64_t count = 0; 1591 if (event_supported(event)) { 1592 uint16_t event_idx = supported_event_map[event]; 1593 count = pm_events[event_idx].get_count(env); 1594 } 1595 1596 if (pmu_counter_enabled(env, counter)) { 1597 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; 1598 1599 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) { 1600 env->cp15.c9_pmovsr |= (1 << counter); 1601 pmu_update_irq(env); 1602 } 1603 env->cp15.c14_pmevcntr[counter] = new_pmevcntr; 1604 } 1605 env->cp15.c14_pmevcntr_delta[counter] = count; 1606 } 1607 1608 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) 1609 { 1610 if (pmu_counter_enabled(env, counter)) { 1611 #ifndef CONFIG_USER_ONLY 1612 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1613 uint16_t event_idx = supported_event_map[event]; 1614 uint64_t delta = UINT32_MAX - 1615 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1; 1616 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta); 1617 1618 if (overflow_in > 0) { 1619 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1620 overflow_in; 1621 ARMCPU *cpu = env_archcpu(env); 1622 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1623 } 1624 #endif 1625 1626 env->cp15.c14_pmevcntr_delta[counter] -= 1627 env->cp15.c14_pmevcntr[counter]; 1628 } 1629 } 1630 1631 void pmu_op_start(CPUARMState *env) 1632 { 1633 unsigned int i; 1634 pmccntr_op_start(env); 1635 for (i = 0; i < pmu_num_counters(env); i++) { 1636 pmevcntr_op_start(env, i); 1637 } 1638 } 1639 1640 void pmu_op_finish(CPUARMState *env) 1641 { 1642 unsigned int i; 1643 pmccntr_op_finish(env); 1644 for (i = 0; i < pmu_num_counters(env); i++) { 1645 pmevcntr_op_finish(env, i); 1646 } 1647 } 1648 1649 void pmu_pre_el_change(ARMCPU *cpu, void *ignored) 1650 { 1651 pmu_op_start(&cpu->env); 1652 } 1653 1654 void pmu_post_el_change(ARMCPU *cpu, void *ignored) 1655 { 1656 pmu_op_finish(&cpu->env); 1657 } 1658 1659 void arm_pmu_timer_cb(void *opaque) 1660 { 1661 ARMCPU *cpu = opaque; 1662 1663 /* 1664 * Update all the counter values based on the current underlying counts, 1665 * triggering interrupts to be raised, if necessary. pmu_op_finish() also 1666 * has the effect of setting the cpu->pmu_timer to the next earliest time a 1667 * counter may expire. 1668 */ 1669 pmu_op_start(&cpu->env); 1670 pmu_op_finish(&cpu->env); 1671 } 1672 1673 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1674 uint64_t value) 1675 { 1676 pmu_op_start(env); 1677 1678 if (value & PMCRC) { 1679 /* The counter has been reset */ 1680 env->cp15.c15_ccnt = 0; 1681 } 1682 1683 if (value & PMCRP) { 1684 unsigned int i; 1685 for (i = 0; i < pmu_num_counters(env); i++) { 1686 env->cp15.c14_pmevcntr[i] = 0; 1687 } 1688 } 1689 1690 env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK; 1691 env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK); 1692 1693 pmu_op_finish(env); 1694 } 1695 1696 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, 1697 uint64_t value) 1698 { 1699 unsigned int i; 1700 for (i = 0; i < pmu_num_counters(env); i++) { 1701 /* Increment a counter's count iff: */ 1702 if ((value & (1 << i)) && /* counter's bit is set */ 1703 /* counter is enabled and not filtered */ 1704 pmu_counter_enabled(env, i) && 1705 /* counter is SW_INCR */ 1706 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { 1707 pmevcntr_op_start(env, i); 1708 1709 /* 1710 * Detect if this write causes an overflow since we can't predict 1711 * PMSWINC overflows like we can for other events 1712 */ 1713 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; 1714 1715 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { 1716 env->cp15.c9_pmovsr |= (1 << i); 1717 pmu_update_irq(env); 1718 } 1719 1720 env->cp15.c14_pmevcntr[i] = new_pmswinc; 1721 1722 pmevcntr_op_finish(env, i); 1723 } 1724 } 1725 } 1726 1727 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1728 { 1729 uint64_t ret; 1730 pmccntr_op_start(env); 1731 ret = env->cp15.c15_ccnt; 1732 pmccntr_op_finish(env); 1733 return ret; 1734 } 1735 1736 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1737 uint64_t value) 1738 { 1739 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and 1740 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the 1741 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are 1742 * accessed. 1743 */ 1744 env->cp15.c9_pmselr = value & 0x1f; 1745 } 1746 1747 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1748 uint64_t value) 1749 { 1750 pmccntr_op_start(env); 1751 env->cp15.c15_ccnt = value; 1752 pmccntr_op_finish(env); 1753 } 1754 1755 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, 1756 uint64_t value) 1757 { 1758 uint64_t cur_val = pmccntr_read(env, NULL); 1759 1760 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); 1761 } 1762 1763 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1764 uint64_t value) 1765 { 1766 pmccntr_op_start(env); 1767 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; 1768 pmccntr_op_finish(env); 1769 } 1770 1771 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, 1772 uint64_t value) 1773 { 1774 pmccntr_op_start(env); 1775 /* M is not accessible from AArch32 */ 1776 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | 1777 (value & PMCCFILTR); 1778 pmccntr_op_finish(env); 1779 } 1780 1781 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) 1782 { 1783 /* M is not visible in AArch32 */ 1784 return env->cp15.pmccfiltr_el0 & PMCCFILTR; 1785 } 1786 1787 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1788 uint64_t value) 1789 { 1790 value &= pmu_counter_mask(env); 1791 env->cp15.c9_pmcnten |= value; 1792 } 1793 1794 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1795 uint64_t value) 1796 { 1797 value &= pmu_counter_mask(env); 1798 env->cp15.c9_pmcnten &= ~value; 1799 } 1800 1801 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1802 uint64_t value) 1803 { 1804 value &= pmu_counter_mask(env); 1805 env->cp15.c9_pmovsr &= ~value; 1806 pmu_update_irq(env); 1807 } 1808 1809 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1810 uint64_t value) 1811 { 1812 value &= pmu_counter_mask(env); 1813 env->cp15.c9_pmovsr |= value; 1814 pmu_update_irq(env); 1815 } 1816 1817 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1818 uint64_t value, const uint8_t counter) 1819 { 1820 if (counter == 31) { 1821 pmccfiltr_write(env, ri, value); 1822 } else if (counter < pmu_num_counters(env)) { 1823 pmevcntr_op_start(env, counter); 1824 1825 /* 1826 * If this counter's event type is changing, store the current 1827 * underlying count for the new type in c14_pmevcntr_delta[counter] so 1828 * pmevcntr_op_finish has the correct baseline when it converts back to 1829 * a delta. 1830 */ 1831 uint16_t old_event = env->cp15.c14_pmevtyper[counter] & 1832 PMXEVTYPER_EVTCOUNT; 1833 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; 1834 if (old_event != new_event) { 1835 uint64_t count = 0; 1836 if (event_supported(new_event)) { 1837 uint16_t event_idx = supported_event_map[new_event]; 1838 count = pm_events[event_idx].get_count(env); 1839 } 1840 env->cp15.c14_pmevcntr_delta[counter] = count; 1841 } 1842 1843 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; 1844 pmevcntr_op_finish(env, counter); 1845 } 1846 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when 1847 * PMSELR value is equal to or greater than the number of implemented 1848 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. 1849 */ 1850 } 1851 1852 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, 1853 const uint8_t counter) 1854 { 1855 if (counter == 31) { 1856 return env->cp15.pmccfiltr_el0; 1857 } else if (counter < pmu_num_counters(env)) { 1858 return env->cp15.c14_pmevtyper[counter]; 1859 } else { 1860 /* 1861 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER 1862 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). 1863 */ 1864 return 0; 1865 } 1866 } 1867 1868 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1869 uint64_t value) 1870 { 1871 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1872 pmevtyper_write(env, ri, value, counter); 1873 } 1874 1875 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1876 uint64_t value) 1877 { 1878 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1879 env->cp15.c14_pmevtyper[counter] = value; 1880 1881 /* 1882 * pmevtyper_rawwrite is called between a pair of pmu_op_start and 1883 * pmu_op_finish calls when loading saved state for a migration. Because 1884 * we're potentially updating the type of event here, the value written to 1885 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a 1886 * different counter type. Therefore, we need to set this value to the 1887 * current count for the counter type we're writing so that pmu_op_finish 1888 * has the correct count for its calculation. 1889 */ 1890 uint16_t event = value & PMXEVTYPER_EVTCOUNT; 1891 if (event_supported(event)) { 1892 uint16_t event_idx = supported_event_map[event]; 1893 env->cp15.c14_pmevcntr_delta[counter] = 1894 pm_events[event_idx].get_count(env); 1895 } 1896 } 1897 1898 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1899 { 1900 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1901 return pmevtyper_read(env, ri, counter); 1902 } 1903 1904 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1905 uint64_t value) 1906 { 1907 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); 1908 } 1909 1910 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) 1911 { 1912 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); 1913 } 1914 1915 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1916 uint64_t value, uint8_t counter) 1917 { 1918 if (counter < pmu_num_counters(env)) { 1919 pmevcntr_op_start(env, counter); 1920 env->cp15.c14_pmevcntr[counter] = value; 1921 pmevcntr_op_finish(env, counter); 1922 } 1923 /* 1924 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1925 * are CONSTRAINED UNPREDICTABLE. 1926 */ 1927 } 1928 1929 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, 1930 uint8_t counter) 1931 { 1932 if (counter < pmu_num_counters(env)) { 1933 uint64_t ret; 1934 pmevcntr_op_start(env, counter); 1935 ret = env->cp15.c14_pmevcntr[counter]; 1936 pmevcntr_op_finish(env, counter); 1937 return ret; 1938 } else { 1939 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1940 * are CONSTRAINED UNPREDICTABLE. */ 1941 return 0; 1942 } 1943 } 1944 1945 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1946 uint64_t value) 1947 { 1948 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1949 pmevcntr_write(env, ri, value, counter); 1950 } 1951 1952 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1953 { 1954 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1955 return pmevcntr_read(env, ri, counter); 1956 } 1957 1958 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1959 uint64_t value) 1960 { 1961 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1962 assert(counter < pmu_num_counters(env)); 1963 env->cp15.c14_pmevcntr[counter] = value; 1964 pmevcntr_write(env, ri, value, counter); 1965 } 1966 1967 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) 1968 { 1969 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1970 assert(counter < pmu_num_counters(env)); 1971 return env->cp15.c14_pmevcntr[counter]; 1972 } 1973 1974 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1975 uint64_t value) 1976 { 1977 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); 1978 } 1979 1980 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1981 { 1982 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); 1983 } 1984 1985 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1986 uint64_t value) 1987 { 1988 if (arm_feature(env, ARM_FEATURE_V8)) { 1989 env->cp15.c9_pmuserenr = value & 0xf; 1990 } else { 1991 env->cp15.c9_pmuserenr = value & 1; 1992 } 1993 } 1994 1995 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1996 uint64_t value) 1997 { 1998 /* We have no event counters so only the C bit can be changed */ 1999 value &= pmu_counter_mask(env); 2000 env->cp15.c9_pminten |= value; 2001 pmu_update_irq(env); 2002 } 2003 2004 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2005 uint64_t value) 2006 { 2007 value &= pmu_counter_mask(env); 2008 env->cp15.c9_pminten &= ~value; 2009 pmu_update_irq(env); 2010 } 2011 2012 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 2013 uint64_t value) 2014 { 2015 /* Note that even though the AArch64 view of this register has bits 2016 * [10:0] all RES0 we can only mask the bottom 5, to comply with the 2017 * architectural requirements for bits which are RES0 only in some 2018 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 2019 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) 2020 */ 2021 raw_write(env, ri, value & ~0x1FULL); 2022 } 2023 2024 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 2025 { 2026 /* Begin with base v8.0 state. */ 2027 uint32_t valid_mask = 0x3fff; 2028 ARMCPU *cpu = env_archcpu(env); 2029 2030 if (arm_el_is_aa64(env, 3)) { 2031 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ 2032 valid_mask &= ~SCR_NET; 2033 } else { 2034 valid_mask &= ~(SCR_RW | SCR_ST); 2035 } 2036 2037 if (!arm_feature(env, ARM_FEATURE_EL2)) { 2038 valid_mask &= ~SCR_HCE; 2039 2040 /* On ARMv7, SMD (or SCD as it is called in v7) is only 2041 * supported if EL2 exists. The bit is UNK/SBZP when 2042 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero 2043 * when EL2 is unavailable. 2044 * On ARMv8, this bit is always available. 2045 */ 2046 if (arm_feature(env, ARM_FEATURE_V7) && 2047 !arm_feature(env, ARM_FEATURE_V8)) { 2048 valid_mask &= ~SCR_SMD; 2049 } 2050 } 2051 if (cpu_isar_feature(aa64_lor, cpu)) { 2052 valid_mask |= SCR_TLOR; 2053 } 2054 if (cpu_isar_feature(aa64_pauth, cpu)) { 2055 valid_mask |= SCR_API | SCR_APK; 2056 } 2057 2058 /* Clear all-context RES0 bits. */ 2059 value &= valid_mask; 2060 raw_write(env, ri, value); 2061 } 2062 2063 static CPAccessResult access_aa64_tid2(CPUARMState *env, 2064 const ARMCPRegInfo *ri, 2065 bool isread) 2066 { 2067 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) { 2068 return CP_ACCESS_TRAP_EL2; 2069 } 2070 2071 return CP_ACCESS_OK; 2072 } 2073 2074 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2075 { 2076 ARMCPU *cpu = env_archcpu(env); 2077 2078 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 2079 * bank 2080 */ 2081 uint32_t index = A32_BANKED_REG_GET(env, csselr, 2082 ri->secure & ARM_CP_SECSTATE_S); 2083 2084 return cpu->ccsidr[index]; 2085 } 2086 2087 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2088 uint64_t value) 2089 { 2090 raw_write(env, ri, value & 0xf); 2091 } 2092 2093 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2094 { 2095 CPUState *cs = env_cpu(env); 2096 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 2097 uint64_t ret = 0; 2098 bool allow_virt = (arm_current_el(env) == 1 && 2099 (!arm_is_secure_below_el3(env) || 2100 (env->cp15.scr_el3 & SCR_EEL2))); 2101 2102 if (allow_virt && (hcr_el2 & HCR_IMO)) { 2103 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { 2104 ret |= CPSR_I; 2105 } 2106 } else { 2107 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { 2108 ret |= CPSR_I; 2109 } 2110 } 2111 2112 if (allow_virt && (hcr_el2 & HCR_FMO)) { 2113 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { 2114 ret |= CPSR_F; 2115 } 2116 } else { 2117 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { 2118 ret |= CPSR_F; 2119 } 2120 } 2121 2122 /* External aborts are not possible in QEMU so A bit is always clear */ 2123 return ret; 2124 } 2125 2126 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, 2127 bool isread) 2128 { 2129 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) { 2130 return CP_ACCESS_TRAP_EL2; 2131 } 2132 2133 return CP_ACCESS_OK; 2134 } 2135 2136 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, 2137 bool isread) 2138 { 2139 if (arm_feature(env, ARM_FEATURE_V8)) { 2140 return access_aa64_tid1(env, ri, isread); 2141 } 2142 2143 return CP_ACCESS_OK; 2144 } 2145 2146 static const ARMCPRegInfo v7_cp_reginfo[] = { 2147 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ 2148 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 2149 .access = PL1_W, .type = ARM_CP_NOP }, 2150 /* Performance monitors are implementation defined in v7, 2151 * but with an ARM recommended set of registers, which we 2152 * follow. 2153 * 2154 * Performance registers fall into three categories: 2155 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) 2156 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) 2157 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) 2158 * For the cases controlled by PMUSERENR we must set .access to PL0_RW 2159 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. 2160 */ 2161 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, 2162 .access = PL0_RW, .type = ARM_CP_ALIAS, 2163 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 2164 .writefn = pmcntenset_write, 2165 .accessfn = pmreg_access, 2166 .raw_writefn = raw_write }, 2167 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, 2168 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, 2169 .access = PL0_RW, .accessfn = pmreg_access, 2170 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, 2171 .writefn = pmcntenset_write, .raw_writefn = raw_write }, 2172 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, 2173 .access = PL0_RW, 2174 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 2175 .accessfn = pmreg_access, 2176 .writefn = pmcntenclr_write, 2177 .type = ARM_CP_ALIAS }, 2178 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, 2179 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, 2180 .access = PL0_RW, .accessfn = pmreg_access, 2181 .type = ARM_CP_ALIAS, 2182 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), 2183 .writefn = pmcntenclr_write }, 2184 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, 2185 .access = PL0_RW, .type = ARM_CP_IO, 2186 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2187 .accessfn = pmreg_access, 2188 .writefn = pmovsr_write, 2189 .raw_writefn = raw_write }, 2190 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, 2191 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, 2192 .access = PL0_RW, .accessfn = pmreg_access, 2193 .type = ARM_CP_ALIAS | ARM_CP_IO, 2194 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2195 .writefn = pmovsr_write, 2196 .raw_writefn = raw_write }, 2197 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, 2198 .access = PL0_W, .accessfn = pmreg_access_swinc, 2199 .type = ARM_CP_NO_RAW | ARM_CP_IO, 2200 .writefn = pmswinc_write }, 2201 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, 2202 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, 2203 .access = PL0_W, .accessfn = pmreg_access_swinc, 2204 .type = ARM_CP_NO_RAW | ARM_CP_IO, 2205 .writefn = pmswinc_write }, 2206 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, 2207 .access = PL0_RW, .type = ARM_CP_ALIAS, 2208 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), 2209 .accessfn = pmreg_access_selr, .writefn = pmselr_write, 2210 .raw_writefn = raw_write}, 2211 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, 2212 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, 2213 .access = PL0_RW, .accessfn = pmreg_access_selr, 2214 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), 2215 .writefn = pmselr_write, .raw_writefn = raw_write, }, 2216 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, 2217 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, 2218 .readfn = pmccntr_read, .writefn = pmccntr_write32, 2219 .accessfn = pmreg_access_ccntr }, 2220 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, 2221 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, 2222 .access = PL0_RW, .accessfn = pmreg_access_ccntr, 2223 .type = ARM_CP_IO, 2224 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), 2225 .readfn = pmccntr_read, .writefn = pmccntr_write, 2226 .raw_readfn = raw_read, .raw_writefn = raw_write, }, 2227 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, 2228 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, 2229 .access = PL0_RW, .accessfn = pmreg_access, 2230 .type = ARM_CP_ALIAS | ARM_CP_IO, 2231 .resetvalue = 0, }, 2232 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, 2233 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, 2234 .writefn = pmccfiltr_write, .raw_writefn = raw_write, 2235 .access = PL0_RW, .accessfn = pmreg_access, 2236 .type = ARM_CP_IO, 2237 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), 2238 .resetvalue = 0, }, 2239 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, 2240 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2241 .accessfn = pmreg_access, 2242 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 2243 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, 2244 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, 2245 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2246 .accessfn = pmreg_access, 2247 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 2248 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, 2249 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2250 .accessfn = pmreg_access_xevcntr, 2251 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 2252 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, 2253 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, 2254 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2255 .accessfn = pmreg_access_xevcntr, 2256 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 2257 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, 2258 .access = PL0_R | PL1_RW, .accessfn = access_tpm, 2259 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), 2260 .resetvalue = 0, 2261 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2262 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, 2263 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, 2264 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 2265 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 2266 .resetvalue = 0, 2267 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2268 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, 2269 .access = PL1_RW, .accessfn = access_tpm, 2270 .type = ARM_CP_ALIAS | ARM_CP_IO, 2271 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), 2272 .resetvalue = 0, 2273 .writefn = pmintenset_write, .raw_writefn = raw_write }, 2274 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, 2275 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, 2276 .access = PL1_RW, .accessfn = access_tpm, 2277 .type = ARM_CP_IO, 2278 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2279 .writefn = pmintenset_write, .raw_writefn = raw_write, 2280 .resetvalue = 0x0 }, 2281 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, 2282 .access = PL1_RW, .accessfn = access_tpm, 2283 .type = ARM_CP_ALIAS | ARM_CP_IO, 2284 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2285 .writefn = pmintenclr_write, }, 2286 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, 2287 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, 2288 .access = PL1_RW, .accessfn = access_tpm, 2289 .type = ARM_CP_ALIAS | ARM_CP_IO, 2290 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2291 .writefn = pmintenclr_write }, 2292 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, 2293 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, 2294 .access = PL1_R, 2295 .accessfn = access_aa64_tid2, 2296 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, 2297 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, 2298 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, 2299 .access = PL1_RW, 2300 .accessfn = access_aa64_tid2, 2301 .writefn = csselr_write, .resetvalue = 0, 2302 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), 2303 offsetof(CPUARMState, cp15.csselr_ns) } }, 2304 /* Auxiliary ID register: this actually has an IMPDEF value but for now 2305 * just RAZ for all cores: 2306 */ 2307 { .name = "AIDR", .state = ARM_CP_STATE_BOTH, 2308 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, 2309 .access = PL1_R, .type = ARM_CP_CONST, 2310 .accessfn = access_aa64_tid1, 2311 .resetvalue = 0 }, 2312 /* Auxiliary fault status registers: these also are IMPDEF, and we 2313 * choose to RAZ/WI for all cores. 2314 */ 2315 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, 2316 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, 2317 .access = PL1_RW, .accessfn = access_tvm_trvm, 2318 .type = ARM_CP_CONST, .resetvalue = 0 }, 2319 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, 2320 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, 2321 .access = PL1_RW, .accessfn = access_tvm_trvm, 2322 .type = ARM_CP_CONST, .resetvalue = 0 }, 2323 /* MAIR can just read-as-written because we don't implement caches 2324 * and so don't need to care about memory attributes. 2325 */ 2326 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, 2327 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 2328 .access = PL1_RW, .accessfn = access_tvm_trvm, 2329 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), 2330 .resetvalue = 0 }, 2331 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, 2332 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, 2333 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), 2334 .resetvalue = 0 }, 2335 /* For non-long-descriptor page tables these are PRRR and NMRR; 2336 * regardless they still act as reads-as-written for QEMU. 2337 */ 2338 /* MAIR0/1 are defined separately from their 64-bit counterpart which 2339 * allows them to assign the correct fieldoffset based on the endianness 2340 * handled in the field definitions. 2341 */ 2342 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, 2343 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 2344 .access = PL1_RW, .accessfn = access_tvm_trvm, 2345 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), 2346 offsetof(CPUARMState, cp15.mair0_ns) }, 2347 .resetfn = arm_cp_reset_ignore }, 2348 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, 2349 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, 2350 .access = PL1_RW, .accessfn = access_tvm_trvm, 2351 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), 2352 offsetof(CPUARMState, cp15.mair1_ns) }, 2353 .resetfn = arm_cp_reset_ignore }, 2354 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, 2355 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, 2356 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, 2357 /* 32 bit ITLB invalidates */ 2358 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, 2359 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2360 .writefn = tlbiall_write }, 2361 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 2362 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2363 .writefn = tlbimva_write }, 2364 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, 2365 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2366 .writefn = tlbiasid_write }, 2367 /* 32 bit DTLB invalidates */ 2368 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, 2369 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2370 .writefn = tlbiall_write }, 2371 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 2372 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2373 .writefn = tlbimva_write }, 2374 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, 2375 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2376 .writefn = tlbiasid_write }, 2377 /* 32 bit TLB invalidates */ 2378 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 2379 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2380 .writefn = tlbiall_write }, 2381 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 2382 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2383 .writefn = tlbimva_write }, 2384 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 2385 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2386 .writefn = tlbiasid_write }, 2387 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 2388 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2389 .writefn = tlbimvaa_write }, 2390 REGINFO_SENTINEL 2391 }; 2392 2393 static const ARMCPRegInfo v7mp_cp_reginfo[] = { 2394 /* 32 bit TLB invalidates, Inner Shareable */ 2395 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 2396 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2397 .writefn = tlbiall_is_write }, 2398 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 2399 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2400 .writefn = tlbimva_is_write }, 2401 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 2402 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2403 .writefn = tlbiasid_is_write }, 2404 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 2405 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2406 .writefn = tlbimvaa_is_write }, 2407 REGINFO_SENTINEL 2408 }; 2409 2410 static const ARMCPRegInfo pmovsset_cp_reginfo[] = { 2411 /* PMOVSSET is not implemented in v7 before v7ve */ 2412 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, 2413 .access = PL0_RW, .accessfn = pmreg_access, 2414 .type = ARM_CP_ALIAS | ARM_CP_IO, 2415 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2416 .writefn = pmovsset_write, 2417 .raw_writefn = raw_write }, 2418 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, 2419 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, 2420 .access = PL0_RW, .accessfn = pmreg_access, 2421 .type = ARM_CP_ALIAS | ARM_CP_IO, 2422 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2423 .writefn = pmovsset_write, 2424 .raw_writefn = raw_write }, 2425 REGINFO_SENTINEL 2426 }; 2427 2428 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2429 uint64_t value) 2430 { 2431 value &= 1; 2432 env->teecr = value; 2433 } 2434 2435 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, 2436 bool isread) 2437 { 2438 if (arm_current_el(env) == 0 && (env->teecr & 1)) { 2439 return CP_ACCESS_TRAP; 2440 } 2441 return CP_ACCESS_OK; 2442 } 2443 2444 static const ARMCPRegInfo t2ee_cp_reginfo[] = { 2445 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, 2446 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), 2447 .resetvalue = 0, 2448 .writefn = teecr_write }, 2449 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, 2450 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), 2451 .accessfn = teehbr_access, .resetvalue = 0 }, 2452 REGINFO_SENTINEL 2453 }; 2454 2455 static const ARMCPRegInfo v6k_cp_reginfo[] = { 2456 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, 2457 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, 2458 .access = PL0_RW, 2459 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, 2460 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, 2461 .access = PL0_RW, 2462 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), 2463 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, 2464 .resetfn = arm_cp_reset_ignore }, 2465 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, 2466 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, 2467 .access = PL0_R|PL1_W, 2468 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), 2469 .resetvalue = 0}, 2470 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, 2471 .access = PL0_R|PL1_W, 2472 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), 2473 offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, 2474 .resetfn = arm_cp_reset_ignore }, 2475 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, 2476 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, 2477 .access = PL1_RW, 2478 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, 2479 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, 2480 .access = PL1_RW, 2481 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), 2482 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, 2483 .resetvalue = 0 }, 2484 REGINFO_SENTINEL 2485 }; 2486 2487 #ifndef CONFIG_USER_ONLY 2488 2489 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, 2490 bool isread) 2491 { 2492 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. 2493 * Writable only at the highest implemented exception level. 2494 */ 2495 int el = arm_current_el(env); 2496 uint64_t hcr; 2497 uint32_t cntkctl; 2498 2499 switch (el) { 2500 case 0: 2501 hcr = arm_hcr_el2_eff(env); 2502 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2503 cntkctl = env->cp15.cnthctl_el2; 2504 } else { 2505 cntkctl = env->cp15.c14_cntkctl; 2506 } 2507 if (!extract32(cntkctl, 0, 2)) { 2508 return CP_ACCESS_TRAP; 2509 } 2510 break; 2511 case 1: 2512 if (!isread && ri->state == ARM_CP_STATE_AA32 && 2513 arm_is_secure_below_el3(env)) { 2514 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ 2515 return CP_ACCESS_TRAP_UNCATEGORIZED; 2516 } 2517 break; 2518 case 2: 2519 case 3: 2520 break; 2521 } 2522 2523 if (!isread && el < arm_highest_el(env)) { 2524 return CP_ACCESS_TRAP_UNCATEGORIZED; 2525 } 2526 2527 return CP_ACCESS_OK; 2528 } 2529 2530 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, 2531 bool isread) 2532 { 2533 unsigned int cur_el = arm_current_el(env); 2534 bool secure = arm_is_secure(env); 2535 uint64_t hcr = arm_hcr_el2_eff(env); 2536 2537 switch (cur_el) { 2538 case 0: 2539 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */ 2540 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2541 return (extract32(env->cp15.cnthctl_el2, timeridx, 1) 2542 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); 2543 } 2544 2545 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */ 2546 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { 2547 return CP_ACCESS_TRAP; 2548 } 2549 2550 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */ 2551 if (hcr & HCR_E2H) { 2552 if (timeridx == GTIMER_PHYS && 2553 !extract32(env->cp15.cnthctl_el2, 10, 1)) { 2554 return CP_ACCESS_TRAP_EL2; 2555 } 2556 } else { 2557 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ 2558 if (arm_feature(env, ARM_FEATURE_EL2) && 2559 timeridx == GTIMER_PHYS && !secure && 2560 !extract32(env->cp15.cnthctl_el2, 1, 1)) { 2561 return CP_ACCESS_TRAP_EL2; 2562 } 2563 } 2564 break; 2565 2566 case 1: 2567 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */ 2568 if (arm_feature(env, ARM_FEATURE_EL2) && 2569 timeridx == GTIMER_PHYS && !secure && 2570 (hcr & HCR_E2H 2571 ? !extract32(env->cp15.cnthctl_el2, 10, 1) 2572 : !extract32(env->cp15.cnthctl_el2, 0, 1))) { 2573 return CP_ACCESS_TRAP_EL2; 2574 } 2575 break; 2576 } 2577 return CP_ACCESS_OK; 2578 } 2579 2580 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, 2581 bool isread) 2582 { 2583 unsigned int cur_el = arm_current_el(env); 2584 bool secure = arm_is_secure(env); 2585 uint64_t hcr = arm_hcr_el2_eff(env); 2586 2587 switch (cur_el) { 2588 case 0: 2589 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2590 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */ 2591 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) 2592 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); 2593 } 2594 2595 /* 2596 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from 2597 * EL0 if EL0[PV]TEN is zero. 2598 */ 2599 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { 2600 return CP_ACCESS_TRAP; 2601 } 2602 /* fall through */ 2603 2604 case 1: 2605 if (arm_feature(env, ARM_FEATURE_EL2) && 2606 timeridx == GTIMER_PHYS && !secure) { 2607 if (hcr & HCR_E2H) { 2608 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */ 2609 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { 2610 return CP_ACCESS_TRAP_EL2; 2611 } 2612 } else { 2613 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ 2614 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { 2615 return CP_ACCESS_TRAP_EL2; 2616 } 2617 } 2618 } 2619 break; 2620 } 2621 return CP_ACCESS_OK; 2622 } 2623 2624 static CPAccessResult gt_pct_access(CPUARMState *env, 2625 const ARMCPRegInfo *ri, 2626 bool isread) 2627 { 2628 return gt_counter_access(env, GTIMER_PHYS, isread); 2629 } 2630 2631 static CPAccessResult gt_vct_access(CPUARMState *env, 2632 const ARMCPRegInfo *ri, 2633 bool isread) 2634 { 2635 return gt_counter_access(env, GTIMER_VIRT, isread); 2636 } 2637 2638 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2639 bool isread) 2640 { 2641 return gt_timer_access(env, GTIMER_PHYS, isread); 2642 } 2643 2644 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2645 bool isread) 2646 { 2647 return gt_timer_access(env, GTIMER_VIRT, isread); 2648 } 2649 2650 static CPAccessResult gt_stimer_access(CPUARMState *env, 2651 const ARMCPRegInfo *ri, 2652 bool isread) 2653 { 2654 /* The AArch64 register view of the secure physical timer is 2655 * always accessible from EL3, and configurably accessible from 2656 * Secure EL1. 2657 */ 2658 switch (arm_current_el(env)) { 2659 case 1: 2660 if (!arm_is_secure(env)) { 2661 return CP_ACCESS_TRAP; 2662 } 2663 if (!(env->cp15.scr_el3 & SCR_ST)) { 2664 return CP_ACCESS_TRAP_EL3; 2665 } 2666 return CP_ACCESS_OK; 2667 case 0: 2668 case 2: 2669 return CP_ACCESS_TRAP; 2670 case 3: 2671 return CP_ACCESS_OK; 2672 default: 2673 g_assert_not_reached(); 2674 } 2675 } 2676 2677 static uint64_t gt_get_countervalue(CPUARMState *env) 2678 { 2679 ARMCPU *cpu = env_archcpu(env); 2680 2681 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu); 2682 } 2683 2684 static void gt_recalc_timer(ARMCPU *cpu, int timeridx) 2685 { 2686 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; 2687 2688 if (gt->ctl & 1) { 2689 /* Timer enabled: calculate and set current ISTATUS, irq, and 2690 * reset timer to when ISTATUS next has to change 2691 */ 2692 uint64_t offset = timeridx == GTIMER_VIRT ? 2693 cpu->env.cp15.cntvoff_el2 : 0; 2694 uint64_t count = gt_get_countervalue(&cpu->env); 2695 /* Note that this must be unsigned 64 bit arithmetic: */ 2696 int istatus = count - offset >= gt->cval; 2697 uint64_t nexttick; 2698 int irqstate; 2699 2700 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); 2701 2702 irqstate = (istatus && !(gt->ctl & 2)); 2703 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2704 2705 if (istatus) { 2706 /* Next transition is when count rolls back over to zero */ 2707 nexttick = UINT64_MAX; 2708 } else { 2709 /* Next transition is when we hit cval */ 2710 nexttick = gt->cval + offset; 2711 } 2712 /* Note that the desired next expiry time might be beyond the 2713 * signed-64-bit range of a QEMUTimer -- in this case we just 2714 * set the timer for as far in the future as possible. When the 2715 * timer expires we will reset the timer for any remaining period. 2716 */ 2717 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) { 2718 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); 2719 } else { 2720 timer_mod(cpu->gt_timer[timeridx], nexttick); 2721 } 2722 trace_arm_gt_recalc(timeridx, irqstate, nexttick); 2723 } else { 2724 /* Timer disabled: ISTATUS and timer output always clear */ 2725 gt->ctl &= ~4; 2726 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); 2727 timer_del(cpu->gt_timer[timeridx]); 2728 trace_arm_gt_recalc_disabled(timeridx); 2729 } 2730 } 2731 2732 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 2733 int timeridx) 2734 { 2735 ARMCPU *cpu = env_archcpu(env); 2736 2737 timer_del(cpu->gt_timer[timeridx]); 2738 } 2739 2740 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2741 { 2742 return gt_get_countervalue(env); 2743 } 2744 2745 static uint64_t gt_virt_cnt_offset(CPUARMState *env) 2746 { 2747 uint64_t hcr; 2748 2749 switch (arm_current_el(env)) { 2750 case 2: 2751 hcr = arm_hcr_el2_eff(env); 2752 if (hcr & HCR_E2H) { 2753 return 0; 2754 } 2755 break; 2756 case 0: 2757 hcr = arm_hcr_el2_eff(env); 2758 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2759 return 0; 2760 } 2761 break; 2762 } 2763 2764 return env->cp15.cntvoff_el2; 2765 } 2766 2767 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2768 { 2769 return gt_get_countervalue(env) - gt_virt_cnt_offset(env); 2770 } 2771 2772 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2773 int timeridx, 2774 uint64_t value) 2775 { 2776 trace_arm_gt_cval_write(timeridx, value); 2777 env->cp15.c14_timer[timeridx].cval = value; 2778 gt_recalc_timer(env_archcpu(env), timeridx); 2779 } 2780 2781 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, 2782 int timeridx) 2783 { 2784 uint64_t offset = 0; 2785 2786 switch (timeridx) { 2787 case GTIMER_VIRT: 2788 case GTIMER_HYPVIRT: 2789 offset = gt_virt_cnt_offset(env); 2790 break; 2791 } 2792 2793 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - 2794 (gt_get_countervalue(env) - offset)); 2795 } 2796 2797 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2798 int timeridx, 2799 uint64_t value) 2800 { 2801 uint64_t offset = 0; 2802 2803 switch (timeridx) { 2804 case GTIMER_VIRT: 2805 case GTIMER_HYPVIRT: 2806 offset = gt_virt_cnt_offset(env); 2807 break; 2808 } 2809 2810 trace_arm_gt_tval_write(timeridx, value); 2811 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 2812 sextract64(value, 0, 32); 2813 gt_recalc_timer(env_archcpu(env), timeridx); 2814 } 2815 2816 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2817 int timeridx, 2818 uint64_t value) 2819 { 2820 ARMCPU *cpu = env_archcpu(env); 2821 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 2822 2823 trace_arm_gt_ctl_write(timeridx, value); 2824 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); 2825 if ((oldval ^ value) & 1) { 2826 /* Enable toggled */ 2827 gt_recalc_timer(cpu, timeridx); 2828 } else if ((oldval ^ value) & 2) { 2829 /* IMASK toggled: don't need to recalculate, 2830 * just set the interrupt line based on ISTATUS 2831 */ 2832 int irqstate = (oldval & 4) && !(value & 2); 2833 2834 trace_arm_gt_imask_toggle(timeridx, irqstate); 2835 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2836 } 2837 } 2838 2839 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2840 { 2841 gt_timer_reset(env, ri, GTIMER_PHYS); 2842 } 2843 2844 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2845 uint64_t value) 2846 { 2847 gt_cval_write(env, ri, GTIMER_PHYS, value); 2848 } 2849 2850 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2851 { 2852 return gt_tval_read(env, ri, GTIMER_PHYS); 2853 } 2854 2855 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2856 uint64_t value) 2857 { 2858 gt_tval_write(env, ri, GTIMER_PHYS, value); 2859 } 2860 2861 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2862 uint64_t value) 2863 { 2864 gt_ctl_write(env, ri, GTIMER_PHYS, value); 2865 } 2866 2867 static int gt_phys_redir_timeridx(CPUARMState *env) 2868 { 2869 switch (arm_mmu_idx(env)) { 2870 case ARMMMUIdx_E20_0: 2871 case ARMMMUIdx_E20_2: 2872 case ARMMMUIdx_E20_2_PAN: 2873 return GTIMER_HYP; 2874 default: 2875 return GTIMER_PHYS; 2876 } 2877 } 2878 2879 static int gt_virt_redir_timeridx(CPUARMState *env) 2880 { 2881 switch (arm_mmu_idx(env)) { 2882 case ARMMMUIdx_E20_0: 2883 case ARMMMUIdx_E20_2: 2884 case ARMMMUIdx_E20_2_PAN: 2885 return GTIMER_HYPVIRT; 2886 default: 2887 return GTIMER_VIRT; 2888 } 2889 } 2890 2891 static uint64_t gt_phys_redir_cval_read(CPUARMState *env, 2892 const ARMCPRegInfo *ri) 2893 { 2894 int timeridx = gt_phys_redir_timeridx(env); 2895 return env->cp15.c14_timer[timeridx].cval; 2896 } 2897 2898 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2899 uint64_t value) 2900 { 2901 int timeridx = gt_phys_redir_timeridx(env); 2902 gt_cval_write(env, ri, timeridx, value); 2903 } 2904 2905 static uint64_t gt_phys_redir_tval_read(CPUARMState *env, 2906 const ARMCPRegInfo *ri) 2907 { 2908 int timeridx = gt_phys_redir_timeridx(env); 2909 return gt_tval_read(env, ri, timeridx); 2910 } 2911 2912 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2913 uint64_t value) 2914 { 2915 int timeridx = gt_phys_redir_timeridx(env); 2916 gt_tval_write(env, ri, timeridx, value); 2917 } 2918 2919 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env, 2920 const ARMCPRegInfo *ri) 2921 { 2922 int timeridx = gt_phys_redir_timeridx(env); 2923 return env->cp15.c14_timer[timeridx].ctl; 2924 } 2925 2926 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2927 uint64_t value) 2928 { 2929 int timeridx = gt_phys_redir_timeridx(env); 2930 gt_ctl_write(env, ri, timeridx, value); 2931 } 2932 2933 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2934 { 2935 gt_timer_reset(env, ri, GTIMER_VIRT); 2936 } 2937 2938 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2939 uint64_t value) 2940 { 2941 gt_cval_write(env, ri, GTIMER_VIRT, value); 2942 } 2943 2944 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2945 { 2946 return gt_tval_read(env, ri, GTIMER_VIRT); 2947 } 2948 2949 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2950 uint64_t value) 2951 { 2952 gt_tval_write(env, ri, GTIMER_VIRT, value); 2953 } 2954 2955 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2956 uint64_t value) 2957 { 2958 gt_ctl_write(env, ri, GTIMER_VIRT, value); 2959 } 2960 2961 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 2962 uint64_t value) 2963 { 2964 ARMCPU *cpu = env_archcpu(env); 2965 2966 trace_arm_gt_cntvoff_write(value); 2967 raw_write(env, ri, value); 2968 gt_recalc_timer(cpu, GTIMER_VIRT); 2969 } 2970 2971 static uint64_t gt_virt_redir_cval_read(CPUARMState *env, 2972 const ARMCPRegInfo *ri) 2973 { 2974 int timeridx = gt_virt_redir_timeridx(env); 2975 return env->cp15.c14_timer[timeridx].cval; 2976 } 2977 2978 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2979 uint64_t value) 2980 { 2981 int timeridx = gt_virt_redir_timeridx(env); 2982 gt_cval_write(env, ri, timeridx, value); 2983 } 2984 2985 static uint64_t gt_virt_redir_tval_read(CPUARMState *env, 2986 const ARMCPRegInfo *ri) 2987 { 2988 int timeridx = gt_virt_redir_timeridx(env); 2989 return gt_tval_read(env, ri, timeridx); 2990 } 2991 2992 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2993 uint64_t value) 2994 { 2995 int timeridx = gt_virt_redir_timeridx(env); 2996 gt_tval_write(env, ri, timeridx, value); 2997 } 2998 2999 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env, 3000 const ARMCPRegInfo *ri) 3001 { 3002 int timeridx = gt_virt_redir_timeridx(env); 3003 return env->cp15.c14_timer[timeridx].ctl; 3004 } 3005 3006 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 3007 uint64_t value) 3008 { 3009 int timeridx = gt_virt_redir_timeridx(env); 3010 gt_ctl_write(env, ri, timeridx, value); 3011 } 3012 3013 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3014 { 3015 gt_timer_reset(env, ri, GTIMER_HYP); 3016 } 3017 3018 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3019 uint64_t value) 3020 { 3021 gt_cval_write(env, ri, GTIMER_HYP, value); 3022 } 3023 3024 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 3025 { 3026 return gt_tval_read(env, ri, GTIMER_HYP); 3027 } 3028 3029 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3030 uint64_t value) 3031 { 3032 gt_tval_write(env, ri, GTIMER_HYP, value); 3033 } 3034 3035 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 3036 uint64_t value) 3037 { 3038 gt_ctl_write(env, ri, GTIMER_HYP, value); 3039 } 3040 3041 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3042 { 3043 gt_timer_reset(env, ri, GTIMER_SEC); 3044 } 3045 3046 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3047 uint64_t value) 3048 { 3049 gt_cval_write(env, ri, GTIMER_SEC, value); 3050 } 3051 3052 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 3053 { 3054 return gt_tval_read(env, ri, GTIMER_SEC); 3055 } 3056 3057 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3058 uint64_t value) 3059 { 3060 gt_tval_write(env, ri, GTIMER_SEC, value); 3061 } 3062 3063 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 3064 uint64_t value) 3065 { 3066 gt_ctl_write(env, ri, GTIMER_SEC, value); 3067 } 3068 3069 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3070 { 3071 gt_timer_reset(env, ri, GTIMER_HYPVIRT); 3072 } 3073 3074 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3075 uint64_t value) 3076 { 3077 gt_cval_write(env, ri, GTIMER_HYPVIRT, value); 3078 } 3079 3080 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 3081 { 3082 return gt_tval_read(env, ri, GTIMER_HYPVIRT); 3083 } 3084 3085 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3086 uint64_t value) 3087 { 3088 gt_tval_write(env, ri, GTIMER_HYPVIRT, value); 3089 } 3090 3091 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 3092 uint64_t value) 3093 { 3094 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value); 3095 } 3096 3097 void arm_gt_ptimer_cb(void *opaque) 3098 { 3099 ARMCPU *cpu = opaque; 3100 3101 gt_recalc_timer(cpu, GTIMER_PHYS); 3102 } 3103 3104 void arm_gt_vtimer_cb(void *opaque) 3105 { 3106 ARMCPU *cpu = opaque; 3107 3108 gt_recalc_timer(cpu, GTIMER_VIRT); 3109 } 3110 3111 void arm_gt_htimer_cb(void *opaque) 3112 { 3113 ARMCPU *cpu = opaque; 3114 3115 gt_recalc_timer(cpu, GTIMER_HYP); 3116 } 3117 3118 void arm_gt_stimer_cb(void *opaque) 3119 { 3120 ARMCPU *cpu = opaque; 3121 3122 gt_recalc_timer(cpu, GTIMER_SEC); 3123 } 3124 3125 void arm_gt_hvtimer_cb(void *opaque) 3126 { 3127 ARMCPU *cpu = opaque; 3128 3129 gt_recalc_timer(cpu, GTIMER_HYPVIRT); 3130 } 3131 3132 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque) 3133 { 3134 ARMCPU *cpu = env_archcpu(env); 3135 3136 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; 3137 } 3138 3139 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 3140 /* Note that CNTFRQ is purely reads-as-written for the benefit 3141 * of software; writing it doesn't actually change the timer frequency. 3142 * Our reset value matches the fixed frequency we implement the timer at. 3143 */ 3144 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, 3145 .type = ARM_CP_ALIAS, 3146 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 3147 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), 3148 }, 3149 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 3150 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 3151 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 3152 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 3153 .resetfn = arm_gt_cntfrq_reset, 3154 }, 3155 /* overall control: mostly access permissions */ 3156 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, 3157 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, 3158 .access = PL1_RW, 3159 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), 3160 .resetvalue = 0, 3161 }, 3162 /* per-timer control */ 3163 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 3164 .secure = ARM_CP_SECSTATE_NS, 3165 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3166 .accessfn = gt_ptimer_access, 3167 .fieldoffset = offsetoflow32(CPUARMState, 3168 cp15.c14_timer[GTIMER_PHYS].ctl), 3169 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, 3170 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, 3171 }, 3172 { .name = "CNTP_CTL_S", 3173 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 3174 .secure = ARM_CP_SECSTATE_S, 3175 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3176 .accessfn = gt_ptimer_access, 3177 .fieldoffset = offsetoflow32(CPUARMState, 3178 cp15.c14_timer[GTIMER_SEC].ctl), 3179 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 3180 }, 3181 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, 3182 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, 3183 .type = ARM_CP_IO, .access = PL0_RW, 3184 .accessfn = gt_ptimer_access, 3185 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 3186 .resetvalue = 0, 3187 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, 3188 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, 3189 }, 3190 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, 3191 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3192 .accessfn = gt_vtimer_access, 3193 .fieldoffset = offsetoflow32(CPUARMState, 3194 cp15.c14_timer[GTIMER_VIRT].ctl), 3195 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, 3196 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, 3197 }, 3198 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, 3199 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, 3200 .type = ARM_CP_IO, .access = PL0_RW, 3201 .accessfn = gt_vtimer_access, 3202 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 3203 .resetvalue = 0, 3204 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, 3205 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, 3206 }, 3207 /* TimerValue views: a 32 bit downcounting view of the underlying state */ 3208 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 3209 .secure = ARM_CP_SECSTATE_NS, 3210 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3211 .accessfn = gt_ptimer_access, 3212 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, 3213 }, 3214 { .name = "CNTP_TVAL_S", 3215 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 3216 .secure = ARM_CP_SECSTATE_S, 3217 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3218 .accessfn = gt_ptimer_access, 3219 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, 3220 }, 3221 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, 3222 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, 3223 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3224 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, 3225 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, 3226 }, 3227 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, 3228 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3229 .accessfn = gt_vtimer_access, 3230 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, 3231 }, 3232 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, 3233 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, 3234 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3235 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, 3236 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, 3237 }, 3238 /* The counter itself */ 3239 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, 3240 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3241 .accessfn = gt_pct_access, 3242 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 3243 }, 3244 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, 3245 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, 3246 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3247 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 3248 }, 3249 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, 3250 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3251 .accessfn = gt_vct_access, 3252 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 3253 }, 3254 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 3255 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 3256 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3257 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 3258 }, 3259 /* Comparison value, indicating when the timer goes off */ 3260 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, 3261 .secure = ARM_CP_SECSTATE_NS, 3262 .access = PL0_RW, 3263 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3264 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 3265 .accessfn = gt_ptimer_access, 3266 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, 3267 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, 3268 }, 3269 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, 3270 .secure = ARM_CP_SECSTATE_S, 3271 .access = PL0_RW, 3272 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3273 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 3274 .accessfn = gt_ptimer_access, 3275 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 3276 }, 3277 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, 3278 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, 3279 .access = PL0_RW, 3280 .type = ARM_CP_IO, 3281 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 3282 .resetvalue = 0, .accessfn = gt_ptimer_access, 3283 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, 3284 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, 3285 }, 3286 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, 3287 .access = PL0_RW, 3288 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3289 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 3290 .accessfn = gt_vtimer_access, 3291 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, 3292 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, 3293 }, 3294 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, 3295 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, 3296 .access = PL0_RW, 3297 .type = ARM_CP_IO, 3298 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 3299 .resetvalue = 0, .accessfn = gt_vtimer_access, 3300 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, 3301 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, 3302 }, 3303 /* Secure timer -- this is actually restricted to only EL3 3304 * and configurably Secure-EL1 via the accessfn. 3305 */ 3306 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, 3307 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, 3308 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, 3309 .accessfn = gt_stimer_access, 3310 .readfn = gt_sec_tval_read, 3311 .writefn = gt_sec_tval_write, 3312 .resetfn = gt_sec_timer_reset, 3313 }, 3314 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, 3315 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, 3316 .type = ARM_CP_IO, .access = PL1_RW, 3317 .accessfn = gt_stimer_access, 3318 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), 3319 .resetvalue = 0, 3320 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 3321 }, 3322 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, 3323 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, 3324 .type = ARM_CP_IO, .access = PL1_RW, 3325 .accessfn = gt_stimer_access, 3326 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 3327 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 3328 }, 3329 REGINFO_SENTINEL 3330 }; 3331 3332 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, 3333 bool isread) 3334 { 3335 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { 3336 return CP_ACCESS_TRAP; 3337 } 3338 return CP_ACCESS_OK; 3339 } 3340 3341 #else 3342 3343 /* In user-mode most of the generic timer registers are inaccessible 3344 * however modern kernels (4.12+) allow access to cntvct_el0 3345 */ 3346 3347 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 3348 { 3349 ARMCPU *cpu = env_archcpu(env); 3350 3351 /* Currently we have no support for QEMUTimer in linux-user so we 3352 * can't call gt_get_countervalue(env), instead we directly 3353 * call the lower level functions. 3354 */ 3355 return cpu_get_clock() / gt_cntfrq_period_ns(cpu); 3356 } 3357 3358 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 3359 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 3360 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 3361 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */, 3362 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 3363 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE, 3364 }, 3365 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 3366 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 3367 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3368 .readfn = gt_virt_cnt_read, 3369 }, 3370 REGINFO_SENTINEL 3371 }; 3372 3373 #endif 3374 3375 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3376 { 3377 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3378 raw_write(env, ri, value); 3379 } else if (arm_feature(env, ARM_FEATURE_V7)) { 3380 raw_write(env, ri, value & 0xfffff6ff); 3381 } else { 3382 raw_write(env, ri, value & 0xfffff1ff); 3383 } 3384 } 3385 3386 #ifndef CONFIG_USER_ONLY 3387 /* get_phys_addr() isn't present for user-mode-only targets */ 3388 3389 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, 3390 bool isread) 3391 { 3392 if (ri->opc2 & 4) { 3393 /* The ATS12NSO* operations must trap to EL3 if executed in 3394 * Secure EL1 (which can only happen if EL3 is AArch64). 3395 * They are simply UNDEF if executed from NS EL1. 3396 * They function normally from EL2 or EL3. 3397 */ 3398 if (arm_current_el(env) == 1) { 3399 if (arm_is_secure_below_el3(env)) { 3400 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; 3401 } 3402 return CP_ACCESS_TRAP_UNCATEGORIZED; 3403 } 3404 } 3405 return CP_ACCESS_OK; 3406 } 3407 3408 #ifdef CONFIG_TCG 3409 static uint64_t do_ats_write(CPUARMState *env, uint64_t value, 3410 MMUAccessType access_type, ARMMMUIdx mmu_idx) 3411 { 3412 hwaddr phys_addr; 3413 target_ulong page_size; 3414 int prot; 3415 bool ret; 3416 uint64_t par64; 3417 bool format64 = false; 3418 MemTxAttrs attrs = {}; 3419 ARMMMUFaultInfo fi = {}; 3420 ARMCacheAttrs cacheattrs = {}; 3421 3422 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, 3423 &prot, &page_size, &fi, &cacheattrs); 3424 3425 if (ret) { 3426 /* 3427 * Some kinds of translation fault must cause exceptions rather 3428 * than being reported in the PAR. 3429 */ 3430 int current_el = arm_current_el(env); 3431 int target_el; 3432 uint32_t syn, fsr, fsc; 3433 bool take_exc = false; 3434 3435 if (fi.s1ptw && current_el == 1 && !arm_is_secure(env) 3436 && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { 3437 /* 3438 * Synchronous stage 2 fault on an access made as part of the 3439 * translation table walk for AT S1E0* or AT S1E1* insn 3440 * executed from NS EL1. If this is a synchronous external abort 3441 * and SCR_EL3.EA == 1, then we take a synchronous external abort 3442 * to EL3. Otherwise the fault is taken as an exception to EL2, 3443 * and HPFAR_EL2 holds the faulting IPA. 3444 */ 3445 if (fi.type == ARMFault_SyncExternalOnWalk && 3446 (env->cp15.scr_el3 & SCR_EA)) { 3447 target_el = 3; 3448 } else { 3449 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; 3450 target_el = 2; 3451 } 3452 take_exc = true; 3453 } else if (fi.type == ARMFault_SyncExternalOnWalk) { 3454 /* 3455 * Synchronous external aborts during a translation table walk 3456 * are taken as Data Abort exceptions. 3457 */ 3458 if (fi.stage2) { 3459 if (current_el == 3) { 3460 target_el = 3; 3461 } else { 3462 target_el = 2; 3463 } 3464 } else { 3465 target_el = exception_target_el(env); 3466 } 3467 take_exc = true; 3468 } 3469 3470 if (take_exc) { 3471 /* Construct FSR and FSC using same logic as arm_deliver_fault() */ 3472 if (target_el == 2 || arm_el_is_aa64(env, target_el) || 3473 arm_s1_regime_using_lpae_format(env, mmu_idx)) { 3474 fsr = arm_fi_to_lfsc(&fi); 3475 fsc = extract32(fsr, 0, 6); 3476 } else { 3477 fsr = arm_fi_to_sfsc(&fi); 3478 fsc = 0x3f; 3479 } 3480 /* 3481 * Report exception with ESR indicating a fault due to a 3482 * translation table walk for a cache maintenance instruction. 3483 */ 3484 syn = syn_data_abort_no_iss(current_el == target_el, 3485 fi.ea, 1, fi.s1ptw, 1, fsc); 3486 env->exception.vaddress = value; 3487 env->exception.fsr = fsr; 3488 raise_exception(env, EXCP_DATA_ABORT, syn, target_el); 3489 } 3490 } 3491 3492 if (is_a64(env)) { 3493 format64 = true; 3494 } else if (arm_feature(env, ARM_FEATURE_LPAE)) { 3495 /* 3496 * ATS1Cxx: 3497 * * TTBCR.EAE determines whether the result is returned using the 3498 * 32-bit or the 64-bit PAR format 3499 * * Instructions executed in Hyp mode always use the 64bit format 3500 * 3501 * ATS1S2NSOxx uses the 64bit format if any of the following is true: 3502 * * The Non-secure TTBCR.EAE bit is set to 1 3503 * * The implementation includes EL2, and the value of HCR.VM is 1 3504 * 3505 * (Note that HCR.DC makes HCR.VM behave as if it is 1.) 3506 * 3507 * ATS1Hx always uses the 64bit format. 3508 */ 3509 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); 3510 3511 if (arm_feature(env, ARM_FEATURE_EL2)) { 3512 if (mmu_idx == ARMMMUIdx_E10_0 || 3513 mmu_idx == ARMMMUIdx_E10_1 || 3514 mmu_idx == ARMMMUIdx_E10_1_PAN) { 3515 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); 3516 } else { 3517 format64 |= arm_current_el(env) == 2; 3518 } 3519 } 3520 } 3521 3522 if (format64) { 3523 /* Create a 64-bit PAR */ 3524 par64 = (1 << 11); /* LPAE bit always set */ 3525 if (!ret) { 3526 par64 |= phys_addr & ~0xfffULL; 3527 if (!attrs.secure) { 3528 par64 |= (1 << 9); /* NS */ 3529 } 3530 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ 3531 par64 |= cacheattrs.shareability << 7; /* SH */ 3532 } else { 3533 uint32_t fsr = arm_fi_to_lfsc(&fi); 3534 3535 par64 |= 1; /* F */ 3536 par64 |= (fsr & 0x3f) << 1; /* FS */ 3537 if (fi.stage2) { 3538 par64 |= (1 << 9); /* S */ 3539 } 3540 if (fi.s1ptw) { 3541 par64 |= (1 << 8); /* PTW */ 3542 } 3543 } 3544 } else { 3545 /* fsr is a DFSR/IFSR value for the short descriptor 3546 * translation table format (with WnR always clear). 3547 * Convert it to a 32-bit PAR. 3548 */ 3549 if (!ret) { 3550 /* We do not set any attribute bits in the PAR */ 3551 if (page_size == (1 << 24) 3552 && arm_feature(env, ARM_FEATURE_V7)) { 3553 par64 = (phys_addr & 0xff000000) | (1 << 1); 3554 } else { 3555 par64 = phys_addr & 0xfffff000; 3556 } 3557 if (!attrs.secure) { 3558 par64 |= (1 << 9); /* NS */ 3559 } 3560 } else { 3561 uint32_t fsr = arm_fi_to_sfsc(&fi); 3562 3563 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | 3564 ((fsr & 0xf) << 1) | 1; 3565 } 3566 } 3567 return par64; 3568 } 3569 #endif /* CONFIG_TCG */ 3570 3571 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3572 { 3573 #ifdef CONFIG_TCG 3574 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3575 uint64_t par64; 3576 ARMMMUIdx mmu_idx; 3577 int el = arm_current_el(env); 3578 bool secure = arm_is_secure_below_el3(env); 3579 3580 switch (ri->opc2 & 6) { 3581 case 0: 3582 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ 3583 switch (el) { 3584 case 3: 3585 mmu_idx = ARMMMUIdx_SE3; 3586 break; 3587 case 2: 3588 g_assert(!secure); /* TODO: ARMv8.4-SecEL2 */ 3589 /* fall through */ 3590 case 1: 3591 if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) { 3592 mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN 3593 : ARMMMUIdx_Stage1_E1_PAN); 3594 } else { 3595 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1; 3596 } 3597 break; 3598 default: 3599 g_assert_not_reached(); 3600 } 3601 break; 3602 case 2: 3603 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ 3604 switch (el) { 3605 case 3: 3606 mmu_idx = ARMMMUIdx_SE10_0; 3607 break; 3608 case 2: 3609 mmu_idx = ARMMMUIdx_Stage1_E0; 3610 break; 3611 case 1: 3612 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0; 3613 break; 3614 default: 3615 g_assert_not_reached(); 3616 } 3617 break; 3618 case 4: 3619 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ 3620 mmu_idx = ARMMMUIdx_E10_1; 3621 break; 3622 case 6: 3623 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ 3624 mmu_idx = ARMMMUIdx_E10_0; 3625 break; 3626 default: 3627 g_assert_not_reached(); 3628 } 3629 3630 par64 = do_ats_write(env, value, access_type, mmu_idx); 3631 3632 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3633 #else 3634 /* Handled by hardware accelerator. */ 3635 g_assert_not_reached(); 3636 #endif /* CONFIG_TCG */ 3637 } 3638 3639 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, 3640 uint64_t value) 3641 { 3642 #ifdef CONFIG_TCG 3643 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3644 uint64_t par64; 3645 3646 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2); 3647 3648 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3649 #else 3650 /* Handled by hardware accelerator. */ 3651 g_assert_not_reached(); 3652 #endif /* CONFIG_TCG */ 3653 } 3654 3655 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, 3656 bool isread) 3657 { 3658 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) { 3659 return CP_ACCESS_TRAP; 3660 } 3661 return CP_ACCESS_OK; 3662 } 3663 3664 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, 3665 uint64_t value) 3666 { 3667 #ifdef CONFIG_TCG 3668 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3669 ARMMMUIdx mmu_idx; 3670 int secure = arm_is_secure_below_el3(env); 3671 3672 switch (ri->opc2 & 6) { 3673 case 0: 3674 switch (ri->opc1) { 3675 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ 3676 if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) { 3677 mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN 3678 : ARMMMUIdx_Stage1_E1_PAN); 3679 } else { 3680 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1; 3681 } 3682 break; 3683 case 4: /* AT S1E2R, AT S1E2W */ 3684 mmu_idx = ARMMMUIdx_E2; 3685 break; 3686 case 6: /* AT S1E3R, AT S1E3W */ 3687 mmu_idx = ARMMMUIdx_SE3; 3688 break; 3689 default: 3690 g_assert_not_reached(); 3691 } 3692 break; 3693 case 2: /* AT S1E0R, AT S1E0W */ 3694 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0; 3695 break; 3696 case 4: /* AT S12E1R, AT S12E1W */ 3697 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1; 3698 break; 3699 case 6: /* AT S12E0R, AT S12E0W */ 3700 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0; 3701 break; 3702 default: 3703 g_assert_not_reached(); 3704 } 3705 3706 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); 3707 #else 3708 /* Handled by hardware accelerator. */ 3709 g_assert_not_reached(); 3710 #endif /* CONFIG_TCG */ 3711 } 3712 #endif 3713 3714 static const ARMCPRegInfo vapa_cp_reginfo[] = { 3715 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, 3716 .access = PL1_RW, .resetvalue = 0, 3717 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), 3718 offsetoflow32(CPUARMState, cp15.par_ns) }, 3719 .writefn = par_write }, 3720 #ifndef CONFIG_USER_ONLY 3721 /* This underdecoding is safe because the reginfo is NO_RAW. */ 3722 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, 3723 .access = PL1_W, .accessfn = ats_access, 3724 .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 3725 #endif 3726 REGINFO_SENTINEL 3727 }; 3728 3729 /* Return basic MPU access permission bits. */ 3730 static uint32_t simple_mpu_ap_bits(uint32_t val) 3731 { 3732 uint32_t ret; 3733 uint32_t mask; 3734 int i; 3735 ret = 0; 3736 mask = 3; 3737 for (i = 0; i < 16; i += 2) { 3738 ret |= (val >> i) & mask; 3739 mask <<= 2; 3740 } 3741 return ret; 3742 } 3743 3744 /* Pad basic MPU access permission bits to extended format. */ 3745 static uint32_t extended_mpu_ap_bits(uint32_t val) 3746 { 3747 uint32_t ret; 3748 uint32_t mask; 3749 int i; 3750 ret = 0; 3751 mask = 3; 3752 for (i = 0; i < 16; i += 2) { 3753 ret |= (val & mask) << i; 3754 mask <<= 2; 3755 } 3756 return ret; 3757 } 3758 3759 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3760 uint64_t value) 3761 { 3762 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); 3763 } 3764 3765 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3766 { 3767 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); 3768 } 3769 3770 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3771 uint64_t value) 3772 { 3773 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); 3774 } 3775 3776 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3777 { 3778 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); 3779 } 3780 3781 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) 3782 { 3783 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3784 3785 if (!u32p) { 3786 return 0; 3787 } 3788 3789 u32p += env->pmsav7.rnr[M_REG_NS]; 3790 return *u32p; 3791 } 3792 3793 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 3794 uint64_t value) 3795 { 3796 ARMCPU *cpu = env_archcpu(env); 3797 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3798 3799 if (!u32p) { 3800 return; 3801 } 3802 3803 u32p += env->pmsav7.rnr[M_REG_NS]; 3804 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3805 *u32p = value; 3806 } 3807 3808 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3809 uint64_t value) 3810 { 3811 ARMCPU *cpu = env_archcpu(env); 3812 uint32_t nrgs = cpu->pmsav7_dregion; 3813 3814 if (value >= nrgs) { 3815 qemu_log_mask(LOG_GUEST_ERROR, 3816 "PMSAv7 RGNR write >= # supported regions, %" PRIu32 3817 " > %" PRIu32 "\n", (uint32_t)value, nrgs); 3818 return; 3819 } 3820 3821 raw_write(env, ri, value); 3822 } 3823 3824 static const ARMCPRegInfo pmsav7_cp_reginfo[] = { 3825 /* Reset for all these registers is handled in arm_cpu_reset(), 3826 * because the PMSAv7 is also used by M-profile CPUs, which do 3827 * not register cpregs but still need the state to be reset. 3828 */ 3829 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, 3830 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3831 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), 3832 .readfn = pmsav7_read, .writefn = pmsav7_write, 3833 .resetfn = arm_cp_reset_ignore }, 3834 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, 3835 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3836 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), 3837 .readfn = pmsav7_read, .writefn = pmsav7_write, 3838 .resetfn = arm_cp_reset_ignore }, 3839 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, 3840 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3841 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), 3842 .readfn = pmsav7_read, .writefn = pmsav7_write, 3843 .resetfn = arm_cp_reset_ignore }, 3844 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, 3845 .access = PL1_RW, 3846 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), 3847 .writefn = pmsav7_rgnr_write, 3848 .resetfn = arm_cp_reset_ignore }, 3849 REGINFO_SENTINEL 3850 }; 3851 3852 static const ARMCPRegInfo pmsav5_cp_reginfo[] = { 3853 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 3854 .access = PL1_RW, .type = ARM_CP_ALIAS, 3855 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3856 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, 3857 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 3858 .access = PL1_RW, .type = ARM_CP_ALIAS, 3859 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3860 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, 3861 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, 3862 .access = PL1_RW, 3863 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3864 .resetvalue = 0, }, 3865 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, 3866 .access = PL1_RW, 3867 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3868 .resetvalue = 0, }, 3869 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 3870 .access = PL1_RW, 3871 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, 3872 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, 3873 .access = PL1_RW, 3874 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, 3875 /* Protection region base and size registers */ 3876 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, 3877 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3878 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, 3879 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, 3880 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3881 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, 3882 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, 3883 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3884 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, 3885 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, 3886 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3887 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, 3888 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, 3889 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3890 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, 3891 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, 3892 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3893 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, 3894 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, 3895 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3896 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, 3897 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, 3898 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3899 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, 3900 REGINFO_SENTINEL 3901 }; 3902 3903 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 3904 uint64_t value) 3905 { 3906 TCR *tcr = raw_ptr(env, ri); 3907 int maskshift = extract32(value, 0, 3); 3908 3909 if (!arm_feature(env, ARM_FEATURE_V8)) { 3910 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { 3911 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when 3912 * using Long-desciptor translation table format */ 3913 value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); 3914 } else if (arm_feature(env, ARM_FEATURE_EL3)) { 3915 /* In an implementation that includes the Security Extensions 3916 * TTBCR has additional fields PD0 [4] and PD1 [5] for 3917 * Short-descriptor translation table format. 3918 */ 3919 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; 3920 } else { 3921 value &= TTBCR_N; 3922 } 3923 } 3924 3925 /* Update the masks corresponding to the TCR bank being written 3926 * Note that we always calculate mask and base_mask, but 3927 * they are only used for short-descriptor tables (ie if EAE is 0); 3928 * for long-descriptor tables the TCR fields are used differently 3929 * and the mask and base_mask values are meaningless. 3930 */ 3931 tcr->raw_tcr = value; 3932 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); 3933 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); 3934 } 3935 3936 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3937 uint64_t value) 3938 { 3939 ARMCPU *cpu = env_archcpu(env); 3940 TCR *tcr = raw_ptr(env, ri); 3941 3942 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3943 /* With LPAE the TTBCR could result in a change of ASID 3944 * via the TTBCR.A1 bit, so do a TLB flush. 3945 */ 3946 tlb_flush(CPU(cpu)); 3947 } 3948 /* Preserve the high half of TCR_EL1, set via TTBCR2. */ 3949 value = deposit64(tcr->raw_tcr, 0, 32, value); 3950 vmsa_ttbcr_raw_write(env, ri, value); 3951 } 3952 3953 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3954 { 3955 TCR *tcr = raw_ptr(env, ri); 3956 3957 /* Reset both the TCR as well as the masks corresponding to the bank of 3958 * the TCR being reset. 3959 */ 3960 tcr->raw_tcr = 0; 3961 tcr->mask = 0; 3962 tcr->base_mask = 0xffffc000u; 3963 } 3964 3965 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri, 3966 uint64_t value) 3967 { 3968 ARMCPU *cpu = env_archcpu(env); 3969 TCR *tcr = raw_ptr(env, ri); 3970 3971 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ 3972 tlb_flush(CPU(cpu)); 3973 tcr->raw_tcr = value; 3974 } 3975 3976 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3977 uint64_t value) 3978 { 3979 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ 3980 if (cpreg_field_is_64bit(ri) && 3981 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { 3982 ARMCPU *cpu = env_archcpu(env); 3983 tlb_flush(CPU(cpu)); 3984 } 3985 raw_write(env, ri, value); 3986 } 3987 3988 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3989 uint64_t value) 3990 { 3991 /* 3992 * If we are running with E2&0 regime, then an ASID is active. 3993 * Flush if that might be changing. Note we're not checking 3994 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that 3995 * holds the active ASID, only checking the field that might. 3996 */ 3997 if (extract64(raw_read(env, ri) ^ value, 48, 16) && 3998 (arm_hcr_el2_eff(env) & HCR_E2H)) { 3999 tlb_flush_by_mmuidx(env_cpu(env), 4000 ARMMMUIdxBit_E20_2 | 4001 ARMMMUIdxBit_E20_2_PAN | 4002 ARMMMUIdxBit_E20_0); 4003 } 4004 raw_write(env, ri, value); 4005 } 4006 4007 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4008 uint64_t value) 4009 { 4010 ARMCPU *cpu = env_archcpu(env); 4011 CPUState *cs = CPU(cpu); 4012 4013 /* 4014 * A change in VMID to the stage2 page table (Stage2) invalidates 4015 * the combined stage 1&2 tlbs (EL10_1 and EL10_0). 4016 */ 4017 if (raw_read(env, ri) != value) { 4018 tlb_flush_by_mmuidx(cs, 4019 ARMMMUIdxBit_E10_1 | 4020 ARMMMUIdxBit_E10_1_PAN | 4021 ARMMMUIdxBit_E10_0); 4022 raw_write(env, ri, value); 4023 } 4024 } 4025 4026 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { 4027 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 4028 .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, 4029 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), 4030 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, 4031 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 4032 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 4033 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), 4034 offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, 4035 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, 4036 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 4037 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), 4038 offsetof(CPUARMState, cp15.dfar_ns) } }, 4039 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, 4040 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, 4041 .access = PL1_RW, .accessfn = access_tvm_trvm, 4042 .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), 4043 .resetvalue = 0, }, 4044 REGINFO_SENTINEL 4045 }; 4046 4047 static const ARMCPRegInfo vmsa_cp_reginfo[] = { 4048 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, 4049 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, 4050 .access = PL1_RW, .accessfn = access_tvm_trvm, 4051 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, 4052 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, 4053 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, 4054 .access = PL1_RW, .accessfn = access_tvm_trvm, 4055 .writefn = vmsa_ttbr_write, .resetvalue = 0, 4056 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 4057 offsetof(CPUARMState, cp15.ttbr0_ns) } }, 4058 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, 4059 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, 4060 .access = PL1_RW, .accessfn = access_tvm_trvm, 4061 .writefn = vmsa_ttbr_write, .resetvalue = 0, 4062 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 4063 offsetof(CPUARMState, cp15.ttbr1_ns) } }, 4064 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, 4065 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 4066 .access = PL1_RW, .accessfn = access_tvm_trvm, 4067 .writefn = vmsa_tcr_el12_write, 4068 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, 4069 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, 4070 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 4071 .access = PL1_RW, .accessfn = access_tvm_trvm, 4072 .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, 4073 .raw_writefn = vmsa_ttbcr_raw_write, 4074 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), 4075 offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, 4076 REGINFO_SENTINEL 4077 }; 4078 4079 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing 4080 * qemu tlbs nor adjusting cached masks. 4081 */ 4082 static const ARMCPRegInfo ttbcr2_reginfo = { 4083 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3, 4084 .access = PL1_RW, .accessfn = access_tvm_trvm, 4085 .type = ARM_CP_ALIAS, 4086 .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]), 4087 offsetofhigh32(CPUARMState, cp15.tcr_el[1]) }, 4088 }; 4089 4090 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, 4091 uint64_t value) 4092 { 4093 env->cp15.c15_ticonfig = value & 0xe7; 4094 /* The OS_TYPE bit in this register changes the reported CPUID! */ 4095 env->cp15.c0_cpuid = (value & (1 << 5)) ? 4096 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 4097 } 4098 4099 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, 4100 uint64_t value) 4101 { 4102 env->cp15.c15_threadid = value & 0xffff; 4103 } 4104 4105 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, 4106 uint64_t value) 4107 { 4108 /* Wait-for-interrupt (deprecated) */ 4109 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); 4110 } 4111 4112 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, 4113 uint64_t value) 4114 { 4115 /* On OMAP there are registers indicating the max/min index of dcache lines 4116 * containing a dirty line; cache flush operations have to reset these. 4117 */ 4118 env->cp15.c15_i_max = 0x000; 4119 env->cp15.c15_i_min = 0xff0; 4120 } 4121 4122 static const ARMCPRegInfo omap_cp_reginfo[] = { 4123 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, 4124 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, 4125 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), 4126 .resetvalue = 0, }, 4127 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, 4128 .access = PL1_RW, .type = ARM_CP_NOP }, 4129 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, 4130 .access = PL1_RW, 4131 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, 4132 .writefn = omap_ticonfig_write }, 4133 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, 4134 .access = PL1_RW, 4135 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, 4136 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, 4137 .access = PL1_RW, .resetvalue = 0xff0, 4138 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, 4139 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, 4140 .access = PL1_RW, 4141 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, 4142 .writefn = omap_threadid_write }, 4143 { .name = "TI925T_STATUS", .cp = 15, .crn = 15, 4144 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 4145 .type = ARM_CP_NO_RAW, 4146 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, 4147 /* TODO: Peripheral port remap register: 4148 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller 4149 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), 4150 * when MMU is off. 4151 */ 4152 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 4153 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 4154 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, 4155 .writefn = omap_cachemaint_write }, 4156 { .name = "C9", .cp = 15, .crn = 9, 4157 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, 4158 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, 4159 REGINFO_SENTINEL 4160 }; 4161 4162 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, 4163 uint64_t value) 4164 { 4165 env->cp15.c15_cpar = value & 0x3fff; 4166 } 4167 4168 static const ARMCPRegInfo xscale_cp_reginfo[] = { 4169 { .name = "XSCALE_CPAR", 4170 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 4171 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, 4172 .writefn = xscale_cpar_write, }, 4173 { .name = "XSCALE_AUXCR", 4174 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, 4175 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), 4176 .resetvalue = 0, }, 4177 /* XScale specific cache-lockdown: since we have no cache we NOP these 4178 * and hope the guest does not really rely on cache behaviour. 4179 */ 4180 { .name = "XSCALE_LOCK_ICACHE_LINE", 4181 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, 4182 .access = PL1_W, .type = ARM_CP_NOP }, 4183 { .name = "XSCALE_UNLOCK_ICACHE", 4184 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, 4185 .access = PL1_W, .type = ARM_CP_NOP }, 4186 { .name = "XSCALE_DCACHE_LOCK", 4187 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, 4188 .access = PL1_RW, .type = ARM_CP_NOP }, 4189 { .name = "XSCALE_UNLOCK_DCACHE", 4190 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, 4191 .access = PL1_W, .type = ARM_CP_NOP }, 4192 REGINFO_SENTINEL 4193 }; 4194 4195 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { 4196 /* RAZ/WI the whole crn=15 space, when we don't have a more specific 4197 * implementation of this implementation-defined space. 4198 * Ideally this should eventually disappear in favour of actually 4199 * implementing the correct behaviour for all cores. 4200 */ 4201 { .name = "C15_IMPDEF", .cp = 15, .crn = 15, 4202 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 4203 .access = PL1_RW, 4204 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, 4205 .resetvalue = 0 }, 4206 REGINFO_SENTINEL 4207 }; 4208 4209 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { 4210 /* Cache status: RAZ because we have no cache so it's always clean */ 4211 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, 4212 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4213 .resetvalue = 0 }, 4214 REGINFO_SENTINEL 4215 }; 4216 4217 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { 4218 /* We never have a a block transfer operation in progress */ 4219 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, 4220 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4221 .resetvalue = 0 }, 4222 /* The cache ops themselves: these all NOP for QEMU */ 4223 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, 4224 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4225 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, 4226 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4227 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, 4228 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4229 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, 4230 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4231 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, 4232 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4233 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, 4234 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4235 REGINFO_SENTINEL 4236 }; 4237 4238 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { 4239 /* The cache test-and-clean instructions always return (1 << 30) 4240 * to indicate that there are no dirty cache lines. 4241 */ 4242 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, 4243 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4244 .resetvalue = (1 << 30) }, 4245 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, 4246 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4247 .resetvalue = (1 << 30) }, 4248 REGINFO_SENTINEL 4249 }; 4250 4251 static const ARMCPRegInfo strongarm_cp_reginfo[] = { 4252 /* Ignore ReadBuffer accesses */ 4253 { .name = "C9_READBUFFER", .cp = 15, .crn = 9, 4254 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 4255 .access = PL1_RW, .resetvalue = 0, 4256 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, 4257 REGINFO_SENTINEL 4258 }; 4259 4260 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4261 { 4262 ARMCPU *cpu = env_archcpu(env); 4263 unsigned int cur_el = arm_current_el(env); 4264 bool secure = arm_is_secure(env); 4265 4266 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 4267 return env->cp15.vpidr_el2; 4268 } 4269 return raw_read(env, ri); 4270 } 4271 4272 static uint64_t mpidr_read_val(CPUARMState *env) 4273 { 4274 ARMCPU *cpu = env_archcpu(env); 4275 uint64_t mpidr = cpu->mp_affinity; 4276 4277 if (arm_feature(env, ARM_FEATURE_V7MP)) { 4278 mpidr |= (1U << 31); 4279 /* Cores which are uniprocessor (non-coherent) 4280 * but still implement the MP extensions set 4281 * bit 30. (For instance, Cortex-R5). 4282 */ 4283 if (cpu->mp_is_up) { 4284 mpidr |= (1u << 30); 4285 } 4286 } 4287 return mpidr; 4288 } 4289 4290 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4291 { 4292 unsigned int cur_el = arm_current_el(env); 4293 bool secure = arm_is_secure(env); 4294 4295 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 4296 return env->cp15.vmpidr_el2; 4297 } 4298 return mpidr_read_val(env); 4299 } 4300 4301 static const ARMCPRegInfo lpae_cp_reginfo[] = { 4302 /* NOP AMAIR0/1 */ 4303 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, 4304 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, 4305 .access = PL1_RW, .accessfn = access_tvm_trvm, 4306 .type = ARM_CP_CONST, .resetvalue = 0 }, 4307 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ 4308 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, 4309 .access = PL1_RW, .accessfn = access_tvm_trvm, 4310 .type = ARM_CP_CONST, .resetvalue = 0 }, 4311 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, 4312 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, 4313 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), 4314 offsetof(CPUARMState, cp15.par_ns)} }, 4315 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, 4316 .access = PL1_RW, .accessfn = access_tvm_trvm, 4317 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4318 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 4319 offsetof(CPUARMState, cp15.ttbr0_ns) }, 4320 .writefn = vmsa_ttbr_write, }, 4321 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, 4322 .access = PL1_RW, .accessfn = access_tvm_trvm, 4323 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4324 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 4325 offsetof(CPUARMState, cp15.ttbr1_ns) }, 4326 .writefn = vmsa_ttbr_write, }, 4327 REGINFO_SENTINEL 4328 }; 4329 4330 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4331 { 4332 return vfp_get_fpcr(env); 4333 } 4334 4335 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4336 uint64_t value) 4337 { 4338 vfp_set_fpcr(env, value); 4339 } 4340 4341 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4342 { 4343 return vfp_get_fpsr(env); 4344 } 4345 4346 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4347 uint64_t value) 4348 { 4349 vfp_set_fpsr(env, value); 4350 } 4351 4352 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, 4353 bool isread) 4354 { 4355 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { 4356 return CP_ACCESS_TRAP; 4357 } 4358 return CP_ACCESS_OK; 4359 } 4360 4361 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, 4362 uint64_t value) 4363 { 4364 env->daif = value & PSTATE_DAIF; 4365 } 4366 4367 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri) 4368 { 4369 return env->pstate & PSTATE_PAN; 4370 } 4371 4372 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri, 4373 uint64_t value) 4374 { 4375 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); 4376 } 4377 4378 static const ARMCPRegInfo pan_reginfo = { 4379 .name = "PAN", .state = ARM_CP_STATE_AA64, 4380 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3, 4381 .type = ARM_CP_NO_RAW, .access = PL1_RW, 4382 .readfn = aa64_pan_read, .writefn = aa64_pan_write 4383 }; 4384 4385 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri) 4386 { 4387 return env->pstate & PSTATE_UAO; 4388 } 4389 4390 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri, 4391 uint64_t value) 4392 { 4393 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); 4394 } 4395 4396 static const ARMCPRegInfo uao_reginfo = { 4397 .name = "UAO", .state = ARM_CP_STATE_AA64, 4398 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4, 4399 .type = ARM_CP_NO_RAW, .access = PL1_RW, 4400 .readfn = aa64_uao_read, .writefn = aa64_uao_write 4401 }; 4402 4403 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env, 4404 const ARMCPRegInfo *ri, 4405 bool isread) 4406 { 4407 /* Cache invalidate/clean to Point of Coherency or Persistence... */ 4408 switch (arm_current_el(env)) { 4409 case 0: 4410 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ 4411 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { 4412 return CP_ACCESS_TRAP; 4413 } 4414 /* fall through */ 4415 case 1: 4416 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */ 4417 if (arm_hcr_el2_eff(env) & HCR_TPCP) { 4418 return CP_ACCESS_TRAP_EL2; 4419 } 4420 break; 4421 } 4422 return CP_ACCESS_OK; 4423 } 4424 4425 static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env, 4426 const ARMCPRegInfo *ri, 4427 bool isread) 4428 { 4429 /* Cache invalidate/clean to Point of Unification... */ 4430 switch (arm_current_el(env)) { 4431 case 0: 4432 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ 4433 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { 4434 return CP_ACCESS_TRAP; 4435 } 4436 /* fall through */ 4437 case 1: 4438 /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */ 4439 if (arm_hcr_el2_eff(env) & HCR_TPU) { 4440 return CP_ACCESS_TRAP_EL2; 4441 } 4442 break; 4443 } 4444 return CP_ACCESS_OK; 4445 } 4446 4447 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions 4448 * Page D4-1736 (DDI0487A.b) 4449 */ 4450 4451 static int vae1_tlbmask(CPUARMState *env) 4452 { 4453 /* Since we exclude secure first, we may read HCR_EL2 directly. */ 4454 if (arm_is_secure_below_el3(env)) { 4455 return ARMMMUIdxBit_SE10_1 | 4456 ARMMMUIdxBit_SE10_1_PAN | 4457 ARMMMUIdxBit_SE10_0; 4458 } else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) 4459 == (HCR_E2H | HCR_TGE)) { 4460 return ARMMMUIdxBit_E20_2 | 4461 ARMMMUIdxBit_E20_2_PAN | 4462 ARMMMUIdxBit_E20_0; 4463 } else { 4464 return ARMMMUIdxBit_E10_1 | 4465 ARMMMUIdxBit_E10_1_PAN | 4466 ARMMMUIdxBit_E10_0; 4467 } 4468 } 4469 4470 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4471 uint64_t value) 4472 { 4473 CPUState *cs = env_cpu(env); 4474 int mask = vae1_tlbmask(env); 4475 4476 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4477 } 4478 4479 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4480 uint64_t value) 4481 { 4482 CPUState *cs = env_cpu(env); 4483 int mask = vae1_tlbmask(env); 4484 4485 if (tlb_force_broadcast(env)) { 4486 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4487 } else { 4488 tlb_flush_by_mmuidx(cs, mask); 4489 } 4490 } 4491 4492 static int alle1_tlbmask(CPUARMState *env) 4493 { 4494 /* 4495 * Note that the 'ALL' scope must invalidate both stage 1 and 4496 * stage 2 translations, whereas most other scopes only invalidate 4497 * stage 1 translations. 4498 */ 4499 if (arm_is_secure_below_el3(env)) { 4500 return ARMMMUIdxBit_SE10_1 | 4501 ARMMMUIdxBit_SE10_1_PAN | 4502 ARMMMUIdxBit_SE10_0; 4503 } else { 4504 return ARMMMUIdxBit_E10_1 | 4505 ARMMMUIdxBit_E10_1_PAN | 4506 ARMMMUIdxBit_E10_0; 4507 } 4508 } 4509 4510 static int e2_tlbmask(CPUARMState *env) 4511 { 4512 /* TODO: ARMv8.4-SecEL2 */ 4513 return ARMMMUIdxBit_E20_0 | 4514 ARMMMUIdxBit_E20_2 | 4515 ARMMMUIdxBit_E20_2_PAN | 4516 ARMMMUIdxBit_E2; 4517 } 4518 4519 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4520 uint64_t value) 4521 { 4522 CPUState *cs = env_cpu(env); 4523 int mask = alle1_tlbmask(env); 4524 4525 tlb_flush_by_mmuidx(cs, mask); 4526 } 4527 4528 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4529 uint64_t value) 4530 { 4531 CPUState *cs = env_cpu(env); 4532 int mask = e2_tlbmask(env); 4533 4534 tlb_flush_by_mmuidx(cs, mask); 4535 } 4536 4537 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 4538 uint64_t value) 4539 { 4540 ARMCPU *cpu = env_archcpu(env); 4541 CPUState *cs = CPU(cpu); 4542 4543 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3); 4544 } 4545 4546 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4547 uint64_t value) 4548 { 4549 CPUState *cs = env_cpu(env); 4550 int mask = alle1_tlbmask(env); 4551 4552 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4553 } 4554 4555 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4556 uint64_t value) 4557 { 4558 CPUState *cs = env_cpu(env); 4559 int mask = e2_tlbmask(env); 4560 4561 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4562 } 4563 4564 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4565 uint64_t value) 4566 { 4567 CPUState *cs = env_cpu(env); 4568 4569 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3); 4570 } 4571 4572 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4573 uint64_t value) 4574 { 4575 /* Invalidate by VA, EL2 4576 * Currently handles both VAE2 and VALE2, since we don't support 4577 * flush-last-level-only. 4578 */ 4579 CPUState *cs = env_cpu(env); 4580 int mask = e2_tlbmask(env); 4581 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4582 4583 tlb_flush_page_by_mmuidx(cs, pageaddr, mask); 4584 } 4585 4586 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, 4587 uint64_t value) 4588 { 4589 /* Invalidate by VA, EL3 4590 * Currently handles both VAE3 and VALE3, since we don't support 4591 * flush-last-level-only. 4592 */ 4593 ARMCPU *cpu = env_archcpu(env); 4594 CPUState *cs = CPU(cpu); 4595 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4596 4597 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3); 4598 } 4599 4600 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4601 uint64_t value) 4602 { 4603 CPUState *cs = env_cpu(env); 4604 int mask = vae1_tlbmask(env); 4605 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4606 4607 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); 4608 } 4609 4610 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4611 uint64_t value) 4612 { 4613 /* Invalidate by VA, EL1&0 (AArch64 version). 4614 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, 4615 * since we don't support flush-for-specific-ASID-only or 4616 * flush-last-level-only. 4617 */ 4618 CPUState *cs = env_cpu(env); 4619 int mask = vae1_tlbmask(env); 4620 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4621 4622 if (tlb_force_broadcast(env)) { 4623 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); 4624 } else { 4625 tlb_flush_page_by_mmuidx(cs, pageaddr, mask); 4626 } 4627 } 4628 4629 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4630 uint64_t value) 4631 { 4632 CPUState *cs = env_cpu(env); 4633 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4634 4635 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 4636 ARMMMUIdxBit_E2); 4637 } 4638 4639 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4640 uint64_t value) 4641 { 4642 CPUState *cs = env_cpu(env); 4643 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4644 4645 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 4646 ARMMMUIdxBit_SE3); 4647 } 4648 4649 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, 4650 bool isread) 4651 { 4652 int cur_el = arm_current_el(env); 4653 4654 if (cur_el < 2) { 4655 uint64_t hcr = arm_hcr_el2_eff(env); 4656 4657 if (cur_el == 0) { 4658 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 4659 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) { 4660 return CP_ACCESS_TRAP_EL2; 4661 } 4662 } else { 4663 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { 4664 return CP_ACCESS_TRAP; 4665 } 4666 if (hcr & HCR_TDZ) { 4667 return CP_ACCESS_TRAP_EL2; 4668 } 4669 } 4670 } else if (hcr & HCR_TDZ) { 4671 return CP_ACCESS_TRAP_EL2; 4672 } 4673 } 4674 return CP_ACCESS_OK; 4675 } 4676 4677 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 4678 { 4679 ARMCPU *cpu = env_archcpu(env); 4680 int dzp_bit = 1 << 4; 4681 4682 /* DZP indicates whether DC ZVA access is allowed */ 4683 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { 4684 dzp_bit = 0; 4685 } 4686 return cpu->dcz_blocksize | dzp_bit; 4687 } 4688 4689 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 4690 bool isread) 4691 { 4692 if (!(env->pstate & PSTATE_SP)) { 4693 /* Access to SP_EL0 is undefined if it's being used as 4694 * the stack pointer. 4695 */ 4696 return CP_ACCESS_TRAP_UNCATEGORIZED; 4697 } 4698 return CP_ACCESS_OK; 4699 } 4700 4701 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) 4702 { 4703 return env->pstate & PSTATE_SP; 4704 } 4705 4706 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 4707 { 4708 update_spsel(env, val); 4709 } 4710 4711 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4712 uint64_t value) 4713 { 4714 ARMCPU *cpu = env_archcpu(env); 4715 4716 if (raw_read(env, ri) == value) { 4717 /* Skip the TLB flush if nothing actually changed; Linux likes 4718 * to do a lot of pointless SCTLR writes. 4719 */ 4720 return; 4721 } 4722 4723 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { 4724 /* M bit is RAZ/WI for PMSA with no MPU implemented */ 4725 value &= ~SCTLR_M; 4726 } 4727 4728 raw_write(env, ri, value); 4729 /* ??? Lots of these bits are not implemented. */ 4730 /* This may enable/disable the MMU, so do a TLB flush. */ 4731 tlb_flush(CPU(cpu)); 4732 4733 if (ri->type & ARM_CP_SUPPRESS_TB_END) { 4734 /* 4735 * Normally we would always end the TB on an SCTLR write; see the 4736 * comment in ARMCPRegInfo sctlr initialization below for why Xscale 4737 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild 4738 * of hflags from the translator, so do it here. 4739 */ 4740 arm_rebuild_hflags(env); 4741 } 4742 } 4743 4744 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, 4745 bool isread) 4746 { 4747 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { 4748 return CP_ACCESS_TRAP_FP_EL2; 4749 } 4750 if (env->cp15.cptr_el[3] & CPTR_TFP) { 4751 return CP_ACCESS_TRAP_FP_EL3; 4752 } 4753 return CP_ACCESS_OK; 4754 } 4755 4756 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4757 uint64_t value) 4758 { 4759 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; 4760 } 4761 4762 static const ARMCPRegInfo v8_cp_reginfo[] = { 4763 /* Minimal set of EL0-visible registers. This will need to be expanded 4764 * significantly for system emulation of AArch64 CPUs. 4765 */ 4766 { .name = "NZCV", .state = ARM_CP_STATE_AA64, 4767 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, 4768 .access = PL0_RW, .type = ARM_CP_NZCV }, 4769 { .name = "DAIF", .state = ARM_CP_STATE_AA64, 4770 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, 4771 .type = ARM_CP_NO_RAW, 4772 .access = PL0_RW, .accessfn = aa64_daif_access, 4773 .fieldoffset = offsetof(CPUARMState, daif), 4774 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, 4775 { .name = "FPCR", .state = ARM_CP_STATE_AA64, 4776 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, 4777 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4778 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, 4779 { .name = "FPSR", .state = ARM_CP_STATE_AA64, 4780 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, 4781 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4782 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, 4783 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, 4784 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, 4785 .access = PL0_R, .type = ARM_CP_NO_RAW, 4786 .readfn = aa64_dczid_read }, 4787 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, 4788 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, 4789 .access = PL0_W, .type = ARM_CP_DC_ZVA, 4790 #ifndef CONFIG_USER_ONLY 4791 /* Avoid overhead of an access check that always passes in user-mode */ 4792 .accessfn = aa64_zva_access, 4793 #endif 4794 }, 4795 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, 4796 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, 4797 .access = PL1_R, .type = ARM_CP_CURRENTEL }, 4798 /* Cache ops: all NOPs since we don't emulate caches */ 4799 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, 4800 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4801 .access = PL1_W, .type = ARM_CP_NOP, 4802 .accessfn = aa64_cacheop_pou_access }, 4803 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, 4804 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 4805 .access = PL1_W, .type = ARM_CP_NOP, 4806 .accessfn = aa64_cacheop_pou_access }, 4807 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, 4808 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, 4809 .access = PL0_W, .type = ARM_CP_NOP, 4810 .accessfn = aa64_cacheop_pou_access }, 4811 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, 4812 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 4813 .access = PL1_W, .accessfn = aa64_cacheop_poc_access, 4814 .type = ARM_CP_NOP }, 4815 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, 4816 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 4817 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 4818 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, 4819 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, 4820 .access = PL0_W, .type = ARM_CP_NOP, 4821 .accessfn = aa64_cacheop_poc_access }, 4822 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, 4823 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 4824 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 4825 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, 4826 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, 4827 .access = PL0_W, .type = ARM_CP_NOP, 4828 .accessfn = aa64_cacheop_pou_access }, 4829 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, 4830 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, 4831 .access = PL0_W, .type = ARM_CP_NOP, 4832 .accessfn = aa64_cacheop_poc_access }, 4833 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, 4834 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 4835 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 4836 /* TLBI operations */ 4837 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, 4838 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 4839 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4840 .writefn = tlbi_aa64_vmalle1is_write }, 4841 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, 4842 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 4843 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4844 .writefn = tlbi_aa64_vae1is_write }, 4845 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, 4846 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 4847 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4848 .writefn = tlbi_aa64_vmalle1is_write }, 4849 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, 4850 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 4851 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4852 .writefn = tlbi_aa64_vae1is_write }, 4853 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, 4854 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 4855 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4856 .writefn = tlbi_aa64_vae1is_write }, 4857 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, 4858 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 4859 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4860 .writefn = tlbi_aa64_vae1is_write }, 4861 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, 4862 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 4863 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4864 .writefn = tlbi_aa64_vmalle1_write }, 4865 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, 4866 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 4867 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4868 .writefn = tlbi_aa64_vae1_write }, 4869 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, 4870 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 4871 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4872 .writefn = tlbi_aa64_vmalle1_write }, 4873 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, 4874 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 4875 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4876 .writefn = tlbi_aa64_vae1_write }, 4877 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, 4878 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 4879 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4880 .writefn = tlbi_aa64_vae1_write }, 4881 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, 4882 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 4883 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4884 .writefn = tlbi_aa64_vae1_write }, 4885 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, 4886 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 4887 .access = PL2_W, .type = ARM_CP_NOP }, 4888 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, 4889 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 4890 .access = PL2_W, .type = ARM_CP_NOP }, 4891 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, 4892 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 4893 .access = PL2_W, .type = ARM_CP_NO_RAW, 4894 .writefn = tlbi_aa64_alle1is_write }, 4895 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, 4896 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, 4897 .access = PL2_W, .type = ARM_CP_NO_RAW, 4898 .writefn = tlbi_aa64_alle1is_write }, 4899 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, 4900 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 4901 .access = PL2_W, .type = ARM_CP_NOP }, 4902 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, 4903 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 4904 .access = PL2_W, .type = ARM_CP_NOP }, 4905 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, 4906 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 4907 .access = PL2_W, .type = ARM_CP_NO_RAW, 4908 .writefn = tlbi_aa64_alle1_write }, 4909 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, 4910 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, 4911 .access = PL2_W, .type = ARM_CP_NO_RAW, 4912 .writefn = tlbi_aa64_alle1is_write }, 4913 #ifndef CONFIG_USER_ONLY 4914 /* 64 bit address translation operations */ 4915 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 4916 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, 4917 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4918 .writefn = ats_write64 }, 4919 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 4920 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, 4921 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4922 .writefn = ats_write64 }, 4923 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, 4924 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, 4925 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4926 .writefn = ats_write64 }, 4927 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, 4928 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, 4929 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4930 .writefn = ats_write64 }, 4931 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, 4932 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, 4933 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4934 .writefn = ats_write64 }, 4935 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, 4936 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, 4937 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4938 .writefn = ats_write64 }, 4939 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, 4940 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, 4941 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4942 .writefn = ats_write64 }, 4943 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, 4944 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, 4945 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4946 .writefn = ats_write64 }, 4947 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ 4948 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, 4949 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, 4950 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4951 .writefn = ats_write64 }, 4952 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, 4953 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, 4954 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4955 .writefn = ats_write64 }, 4956 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, 4957 .type = ARM_CP_ALIAS, 4958 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, 4959 .access = PL1_RW, .resetvalue = 0, 4960 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), 4961 .writefn = par_write }, 4962 #endif 4963 /* TLB invalidate last level of translation table walk */ 4964 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 4965 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 4966 .writefn = tlbimva_is_write }, 4967 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 4968 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 4969 .writefn = tlbimvaa_is_write }, 4970 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 4971 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 4972 .writefn = tlbimva_write }, 4973 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 4974 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 4975 .writefn = tlbimvaa_write }, 4976 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 4977 .type = ARM_CP_NO_RAW, .access = PL2_W, 4978 .writefn = tlbimva_hyp_write }, 4979 { .name = "TLBIMVALHIS", 4980 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 4981 .type = ARM_CP_NO_RAW, .access = PL2_W, 4982 .writefn = tlbimva_hyp_is_write }, 4983 { .name = "TLBIIPAS2", 4984 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 4985 .type = ARM_CP_NOP, .access = PL2_W }, 4986 { .name = "TLBIIPAS2IS", 4987 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 4988 .type = ARM_CP_NOP, .access = PL2_W }, 4989 { .name = "TLBIIPAS2L", 4990 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 4991 .type = ARM_CP_NOP, .access = PL2_W }, 4992 { .name = "TLBIIPAS2LIS", 4993 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 4994 .type = ARM_CP_NOP, .access = PL2_W }, 4995 /* 32 bit cache operations */ 4996 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4997 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 4998 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, 4999 .type = ARM_CP_NOP, .access = PL1_W }, 5000 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 5001 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 5002 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, 5003 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 5004 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, 5005 .type = ARM_CP_NOP, .access = PL1_W }, 5006 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, 5007 .type = ARM_CP_NOP, .access = PL1_W }, 5008 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 5009 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 5010 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 5011 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 5012 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, 5013 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 5014 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 5015 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 5016 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, 5017 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 5018 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, 5019 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 5020 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 5021 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 5022 /* MMU Domain access control / MPU write buffer control */ 5023 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, 5024 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 5025 .writefn = dacr_write, .raw_writefn = raw_write, 5026 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 5027 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 5028 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, 5029 .type = ARM_CP_ALIAS, 5030 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, 5031 .access = PL1_RW, 5032 .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, 5033 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, 5034 .type = ARM_CP_ALIAS, 5035 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, 5036 .access = PL1_RW, 5037 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, 5038 /* We rely on the access checks not allowing the guest to write to the 5039 * state field when SPSel indicates that it's being used as the stack 5040 * pointer. 5041 */ 5042 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, 5043 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, 5044 .access = PL1_RW, .accessfn = sp_el0_access, 5045 .type = ARM_CP_ALIAS, 5046 .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, 5047 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, 5048 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, 5049 .access = PL2_RW, .type = ARM_CP_ALIAS, 5050 .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, 5051 { .name = "SPSel", .state = ARM_CP_STATE_AA64, 5052 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, 5053 .type = ARM_CP_NO_RAW, 5054 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, 5055 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, 5056 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, 5057 .type = ARM_CP_ALIAS, 5058 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), 5059 .access = PL2_RW, .accessfn = fpexc32_access }, 5060 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, 5061 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, 5062 .access = PL2_RW, .resetvalue = 0, 5063 .writefn = dacr_write, .raw_writefn = raw_write, 5064 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, 5065 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, 5066 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, 5067 .access = PL2_RW, .resetvalue = 0, 5068 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, 5069 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, 5070 .type = ARM_CP_ALIAS, 5071 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, 5072 .access = PL2_RW, 5073 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, 5074 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, 5075 .type = ARM_CP_ALIAS, 5076 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, 5077 .access = PL2_RW, 5078 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, 5079 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, 5080 .type = ARM_CP_ALIAS, 5081 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, 5082 .access = PL2_RW, 5083 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, 5084 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, 5085 .type = ARM_CP_ALIAS, 5086 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, 5087 .access = PL2_RW, 5088 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, 5089 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, 5090 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, 5091 .resetvalue = 0, 5092 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, 5093 { .name = "SDCR", .type = ARM_CP_ALIAS, 5094 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, 5095 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5096 .writefn = sdcr_write, 5097 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, 5098 REGINFO_SENTINEL 5099 }; 5100 5101 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ 5102 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { 5103 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 5104 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 5105 .access = PL2_RW, 5106 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 5107 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH, 5108 .type = ARM_CP_NO_RAW, 5109 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5110 .access = PL2_RW, 5111 .type = ARM_CP_CONST, .resetvalue = 0 }, 5112 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 5113 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 5114 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5115 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 5116 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 5117 .access = PL2_RW, 5118 .type = ARM_CP_CONST, .resetvalue = 0 }, 5119 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 5120 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 5121 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5122 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 5123 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 5124 .access = PL2_RW, .type = ARM_CP_CONST, 5125 .resetvalue = 0 }, 5126 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 5127 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 5128 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5129 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 5130 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 5131 .access = PL2_RW, .type = ARM_CP_CONST, 5132 .resetvalue = 0 }, 5133 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 5134 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 5135 .access = PL2_RW, .type = ARM_CP_CONST, 5136 .resetvalue = 0 }, 5137 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 5138 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 5139 .access = PL2_RW, .type = ARM_CP_CONST, 5140 .resetvalue = 0 }, 5141 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 5142 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 5143 .access = PL2_RW, .type = ARM_CP_CONST, 5144 .resetvalue = 0 }, 5145 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 5146 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 5147 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5148 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, 5149 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5150 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5151 .type = ARM_CP_CONST, .resetvalue = 0 }, 5152 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 5153 .cp = 15, .opc1 = 6, .crm = 2, 5154 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5155 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 5156 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 5157 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 5158 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5159 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 5160 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 5161 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5162 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5163 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 5164 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5165 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 5166 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 5167 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5168 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 5169 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5170 .resetvalue = 0 }, 5171 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 5172 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 5173 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5174 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 5175 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 5176 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5177 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 5178 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5179 .resetvalue = 0 }, 5180 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 5181 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 5182 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5183 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 5184 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5185 .resetvalue = 0 }, 5186 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 5187 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 5188 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5189 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 5190 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 5191 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5192 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 5193 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 5194 .access = PL2_RW, .accessfn = access_tda, 5195 .type = ARM_CP_CONST, .resetvalue = 0 }, 5196 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, 5197 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5198 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5199 .type = ARM_CP_CONST, .resetvalue = 0 }, 5200 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 5201 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 5202 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5203 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 5204 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 5205 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5206 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 5207 .type = ARM_CP_CONST, 5208 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 5209 .access = PL2_RW, .resetvalue = 0 }, 5210 REGINFO_SENTINEL 5211 }; 5212 5213 /* Ditto, but for registers which exist in ARMv8 but not v7 */ 5214 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { 5215 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 5216 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 5217 .access = PL2_RW, 5218 .type = ARM_CP_CONST, .resetvalue = 0 }, 5219 REGINFO_SENTINEL 5220 }; 5221 5222 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) 5223 { 5224 ARMCPU *cpu = env_archcpu(env); 5225 5226 if (arm_feature(env, ARM_FEATURE_V8)) { 5227 valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */ 5228 } else { 5229 valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */ 5230 } 5231 5232 if (arm_feature(env, ARM_FEATURE_EL3)) { 5233 valid_mask &= ~HCR_HCD; 5234 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { 5235 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. 5236 * However, if we're using the SMC PSCI conduit then QEMU is 5237 * effectively acting like EL3 firmware and so the guest at 5238 * EL2 should retain the ability to prevent EL1 from being 5239 * able to make SMC calls into the ersatz firmware, so in 5240 * that case HCR.TSC should be read/write. 5241 */ 5242 valid_mask &= ~HCR_TSC; 5243 } 5244 5245 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5246 if (cpu_isar_feature(aa64_vh, cpu)) { 5247 valid_mask |= HCR_E2H; 5248 } 5249 if (cpu_isar_feature(aa64_lor, cpu)) { 5250 valid_mask |= HCR_TLOR; 5251 } 5252 if (cpu_isar_feature(aa64_pauth, cpu)) { 5253 valid_mask |= HCR_API | HCR_APK; 5254 } 5255 } 5256 5257 /* Clear RES0 bits. */ 5258 value &= valid_mask; 5259 5260 /* These bits change the MMU setup: 5261 * HCR_VM enables stage 2 translation 5262 * HCR_PTW forbids certain page-table setups 5263 * HCR_DC Disables stage1 and enables stage2 translation 5264 */ 5265 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { 5266 tlb_flush(CPU(cpu)); 5267 } 5268 env->cp15.hcr_el2 = value; 5269 5270 /* 5271 * Updates to VI and VF require us to update the status of 5272 * virtual interrupts, which are the logical OR of these bits 5273 * and the state of the input lines from the GIC. (This requires 5274 * that we have the iothread lock, which is done by marking the 5275 * reginfo structs as ARM_CP_IO.) 5276 * Note that if a write to HCR pends a VIRQ or VFIQ it is never 5277 * possible for it to be taken immediately, because VIRQ and 5278 * VFIQ are masked unless running at EL0 or EL1, and HCR 5279 * can only be written at EL2. 5280 */ 5281 g_assert(qemu_mutex_iothread_locked()); 5282 arm_cpu_update_virq(cpu); 5283 arm_cpu_update_vfiq(cpu); 5284 } 5285 5286 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 5287 { 5288 do_hcr_write(env, value, 0); 5289 } 5290 5291 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri, 5292 uint64_t value) 5293 { 5294 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */ 5295 value = deposit64(env->cp15.hcr_el2, 32, 32, value); 5296 do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32)); 5297 } 5298 5299 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, 5300 uint64_t value) 5301 { 5302 /* Handle HCR write, i.e. write to low half of HCR_EL2 */ 5303 value = deposit64(env->cp15.hcr_el2, 0, 32, value); 5304 do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32)); 5305 } 5306 5307 /* 5308 * Return the effective value of HCR_EL2. 5309 * Bits that are not included here: 5310 * RW (read from SCR_EL3.RW as needed) 5311 */ 5312 uint64_t arm_hcr_el2_eff(CPUARMState *env) 5313 { 5314 uint64_t ret = env->cp15.hcr_el2; 5315 5316 if (arm_is_secure_below_el3(env)) { 5317 /* 5318 * "This register has no effect if EL2 is not enabled in the 5319 * current Security state". This is ARMv8.4-SecEL2 speak for 5320 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1). 5321 * 5322 * Prior to that, the language was "In an implementation that 5323 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves 5324 * as if this field is 0 for all purposes other than a direct 5325 * read or write access of HCR_EL2". With lots of enumeration 5326 * on a per-field basis. In current QEMU, this is condition 5327 * is arm_is_secure_below_el3. 5328 * 5329 * Since the v8.4 language applies to the entire register, and 5330 * appears to be backward compatible, use that. 5331 */ 5332 return 0; 5333 } 5334 5335 /* 5336 * For a cpu that supports both aarch64 and aarch32, we can set bits 5337 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32. 5338 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32. 5339 */ 5340 if (!arm_el_is_aa64(env, 2)) { 5341 uint64_t aa32_valid; 5342 5343 /* 5344 * These bits are up-to-date as of ARMv8.6. 5345 * For HCR, it's easiest to list just the 2 bits that are invalid. 5346 * For HCR2, list those that are valid. 5347 */ 5348 aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ); 5349 aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE | 5350 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS); 5351 ret &= aa32_valid; 5352 } 5353 5354 if (ret & HCR_TGE) { 5355 /* These bits are up-to-date as of ARMv8.6. */ 5356 if (ret & HCR_E2H) { 5357 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO | 5358 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE | 5359 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU | 5360 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE | 5361 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT | 5362 HCR_TTLBIS | HCR_TTLBOS | HCR_TID5); 5363 } else { 5364 ret |= HCR_FMO | HCR_IMO | HCR_AMO; 5365 } 5366 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE | 5367 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR | 5368 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM | 5369 HCR_TLOR); 5370 } 5371 5372 return ret; 5373 } 5374 5375 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 5376 uint64_t value) 5377 { 5378 /* 5379 * For A-profile AArch32 EL3, if NSACR.CP10 5380 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 5381 */ 5382 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 5383 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 5384 value &= ~(0x3 << 10); 5385 value |= env->cp15.cptr_el[2] & (0x3 << 10); 5386 } 5387 env->cp15.cptr_el[2] = value; 5388 } 5389 5390 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri) 5391 { 5392 /* 5393 * For A-profile AArch32 EL3, if NSACR.CP10 5394 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 5395 */ 5396 uint64_t value = env->cp15.cptr_el[2]; 5397 5398 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 5399 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 5400 value |= 0x3 << 10; 5401 } 5402 return value; 5403 } 5404 5405 static const ARMCPRegInfo el2_cp_reginfo[] = { 5406 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 5407 .type = ARM_CP_IO, 5408 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5409 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 5410 .writefn = hcr_write }, 5411 { .name = "HCR", .state = ARM_CP_STATE_AA32, 5412 .type = ARM_CP_ALIAS | ARM_CP_IO, 5413 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5414 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 5415 .writefn = hcr_writelow }, 5416 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 5417 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 5418 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5419 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, 5420 .type = ARM_CP_ALIAS, 5421 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, 5422 .access = PL2_RW, 5423 .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, 5424 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 5425 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 5426 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, 5427 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 5428 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 5429 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, 5430 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 5431 .type = ARM_CP_ALIAS, 5432 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 5433 .access = PL2_RW, 5434 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, 5435 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, 5436 .type = ARM_CP_ALIAS, 5437 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, 5438 .access = PL2_RW, 5439 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, 5440 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 5441 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 5442 .access = PL2_RW, .writefn = vbar_write, 5443 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), 5444 .resetvalue = 0 }, 5445 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, 5446 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, 5447 .access = PL3_RW, .type = ARM_CP_ALIAS, 5448 .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, 5449 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 5450 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 5451 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, 5452 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]), 5453 .readfn = cptr_el2_read, .writefn = cptr_el2_write }, 5454 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 5455 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 5456 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), 5457 .resetvalue = 0 }, 5458 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 5459 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 5460 .access = PL2_RW, .type = ARM_CP_ALIAS, 5461 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, 5462 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 5463 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 5464 .access = PL2_RW, .type = ARM_CP_CONST, 5465 .resetvalue = 0 }, 5466 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ 5467 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 5468 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 5469 .access = PL2_RW, .type = ARM_CP_CONST, 5470 .resetvalue = 0 }, 5471 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 5472 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 5473 .access = PL2_RW, .type = ARM_CP_CONST, 5474 .resetvalue = 0 }, 5475 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 5476 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 5477 .access = PL2_RW, .type = ARM_CP_CONST, 5478 .resetvalue = 0 }, 5479 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 5480 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 5481 .access = PL2_RW, .writefn = vmsa_tcr_el12_write, 5482 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */ 5483 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, 5484 { .name = "VTCR", .state = ARM_CP_STATE_AA32, 5485 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5486 .type = ARM_CP_ALIAS, 5487 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5488 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 5489 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, 5490 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5491 .access = PL2_RW, 5492 /* no .writefn needed as this can't cause an ASID change; 5493 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 5494 */ 5495 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 5496 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 5497 .cp = 15, .opc1 = 6, .crm = 2, 5498 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 5499 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5500 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), 5501 .writefn = vttbr_write }, 5502 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 5503 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 5504 .access = PL2_RW, .writefn = vttbr_write, 5505 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, 5506 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 5507 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 5508 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, 5509 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, 5510 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5511 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 5512 .access = PL2_RW, .resetvalue = 0, 5513 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, 5514 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 5515 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 5516 .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write, 5517 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 5518 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 5519 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 5520 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 5521 { .name = "TLBIALLNSNH", 5522 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 5523 .type = ARM_CP_NO_RAW, .access = PL2_W, 5524 .writefn = tlbiall_nsnh_write }, 5525 { .name = "TLBIALLNSNHIS", 5526 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 5527 .type = ARM_CP_NO_RAW, .access = PL2_W, 5528 .writefn = tlbiall_nsnh_is_write }, 5529 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 5530 .type = ARM_CP_NO_RAW, .access = PL2_W, 5531 .writefn = tlbiall_hyp_write }, 5532 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 5533 .type = ARM_CP_NO_RAW, .access = PL2_W, 5534 .writefn = tlbiall_hyp_is_write }, 5535 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 5536 .type = ARM_CP_NO_RAW, .access = PL2_W, 5537 .writefn = tlbimva_hyp_write }, 5538 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 5539 .type = ARM_CP_NO_RAW, .access = PL2_W, 5540 .writefn = tlbimva_hyp_is_write }, 5541 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, 5542 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 5543 .type = ARM_CP_NO_RAW, .access = PL2_W, 5544 .writefn = tlbi_aa64_alle2_write }, 5545 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, 5546 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 5547 .type = ARM_CP_NO_RAW, .access = PL2_W, 5548 .writefn = tlbi_aa64_vae2_write }, 5549 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, 5550 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 5551 .access = PL2_W, .type = ARM_CP_NO_RAW, 5552 .writefn = tlbi_aa64_vae2_write }, 5553 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, 5554 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 5555 .access = PL2_W, .type = ARM_CP_NO_RAW, 5556 .writefn = tlbi_aa64_alle2is_write }, 5557 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, 5558 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 5559 .type = ARM_CP_NO_RAW, .access = PL2_W, 5560 .writefn = tlbi_aa64_vae2is_write }, 5561 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, 5562 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 5563 .access = PL2_W, .type = ARM_CP_NO_RAW, 5564 .writefn = tlbi_aa64_vae2is_write }, 5565 #ifndef CONFIG_USER_ONLY 5566 /* Unlike the other EL2-related AT operations, these must 5567 * UNDEF from EL3 if EL2 is not implemented, which is why we 5568 * define them here rather than with the rest of the AT ops. 5569 */ 5570 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, 5571 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 5572 .access = PL2_W, .accessfn = at_s1e2_access, 5573 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, 5574 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, 5575 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 5576 .access = PL2_W, .accessfn = at_s1e2_access, 5577 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, 5578 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE 5579 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 5580 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose 5581 * to behave as if SCR.NS was 1. 5582 */ 5583 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 5584 .access = PL2_W, 5585 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 5586 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 5587 .access = PL2_W, 5588 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 5589 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 5590 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 5591 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the 5592 * reset values as IMPDEF. We choose to reset to 3 to comply with 5593 * both ARMv7 and ARMv8. 5594 */ 5595 .access = PL2_RW, .resetvalue = 3, 5596 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, 5597 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 5598 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 5599 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 5600 .writefn = gt_cntvoff_write, 5601 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 5602 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 5603 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, 5604 .writefn = gt_cntvoff_write, 5605 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 5606 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 5607 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 5608 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 5609 .type = ARM_CP_IO, .access = PL2_RW, 5610 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 5611 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 5612 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 5613 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, 5614 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 5615 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 5616 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 5617 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 5618 .resetfn = gt_hyp_timer_reset, 5619 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, 5620 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 5621 .type = ARM_CP_IO, 5622 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 5623 .access = PL2_RW, 5624 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), 5625 .resetvalue = 0, 5626 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, 5627 #endif 5628 /* The only field of MDCR_EL2 that has a defined architectural reset value 5629 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we 5630 * don't implement any PMU event counters, so using zero as a reset 5631 * value for MDCR_EL2 is okay 5632 */ 5633 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 5634 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 5635 .access = PL2_RW, .resetvalue = 0, 5636 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, 5637 { .name = "HPFAR", .state = ARM_CP_STATE_AA32, 5638 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5639 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5640 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 5641 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, 5642 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5643 .access = PL2_RW, 5644 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 5645 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 5646 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 5647 .access = PL2_RW, 5648 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, 5649 REGINFO_SENTINEL 5650 }; 5651 5652 static const ARMCPRegInfo el2_v8_cp_reginfo[] = { 5653 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 5654 .type = ARM_CP_ALIAS | ARM_CP_IO, 5655 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 5656 .access = PL2_RW, 5657 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), 5658 .writefn = hcr_writehigh }, 5659 REGINFO_SENTINEL 5660 }; 5661 5662 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 5663 bool isread) 5664 { 5665 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. 5666 * At Secure EL1 it traps to EL3. 5667 */ 5668 if (arm_current_el(env) == 3) { 5669 return CP_ACCESS_OK; 5670 } 5671 if (arm_is_secure_below_el3(env)) { 5672 return CP_ACCESS_TRAP_EL3; 5673 } 5674 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ 5675 if (isread) { 5676 return CP_ACCESS_OK; 5677 } 5678 return CP_ACCESS_TRAP_UNCATEGORIZED; 5679 } 5680 5681 static const ARMCPRegInfo el3_cp_reginfo[] = { 5682 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, 5683 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, 5684 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), 5685 .resetvalue = 0, .writefn = scr_write }, 5686 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL, 5687 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, 5688 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5689 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), 5690 .writefn = scr_write }, 5691 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, 5692 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, 5693 .access = PL3_RW, .resetvalue = 0, 5694 .fieldoffset = offsetof(CPUARMState, cp15.sder) }, 5695 { .name = "SDER", 5696 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, 5697 .access = PL3_RW, .resetvalue = 0, 5698 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, 5699 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 5700 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5701 .writefn = vbar_write, .resetvalue = 0, 5702 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, 5703 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, 5704 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, 5705 .access = PL3_RW, .resetvalue = 0, 5706 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, 5707 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, 5708 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, 5709 .access = PL3_RW, 5710 /* no .writefn needed as this can't cause an ASID change; 5711 * we must provide a .raw_writefn and .resetfn because we handle 5712 * reset and migration for the AArch32 TTBCR(S), which might be 5713 * using mask and base_mask. 5714 */ 5715 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, 5716 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, 5717 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, 5718 .type = ARM_CP_ALIAS, 5719 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, 5720 .access = PL3_RW, 5721 .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, 5722 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, 5723 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, 5724 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, 5725 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, 5726 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, 5727 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, 5728 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, 5729 .type = ARM_CP_ALIAS, 5730 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, 5731 .access = PL3_RW, 5732 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, 5733 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, 5734 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, 5735 .access = PL3_RW, .writefn = vbar_write, 5736 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), 5737 .resetvalue = 0 }, 5738 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, 5739 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, 5740 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, 5741 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, 5742 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, 5743 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, 5744 .access = PL3_RW, .resetvalue = 0, 5745 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, 5746 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, 5747 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, 5748 .access = PL3_RW, .type = ARM_CP_CONST, 5749 .resetvalue = 0 }, 5750 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, 5751 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, 5752 .access = PL3_RW, .type = ARM_CP_CONST, 5753 .resetvalue = 0 }, 5754 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, 5755 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, 5756 .access = PL3_RW, .type = ARM_CP_CONST, 5757 .resetvalue = 0 }, 5758 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, 5759 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, 5760 .access = PL3_W, .type = ARM_CP_NO_RAW, 5761 .writefn = tlbi_aa64_alle3is_write }, 5762 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, 5763 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, 5764 .access = PL3_W, .type = ARM_CP_NO_RAW, 5765 .writefn = tlbi_aa64_vae3is_write }, 5766 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, 5767 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, 5768 .access = PL3_W, .type = ARM_CP_NO_RAW, 5769 .writefn = tlbi_aa64_vae3is_write }, 5770 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, 5771 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, 5772 .access = PL3_W, .type = ARM_CP_NO_RAW, 5773 .writefn = tlbi_aa64_alle3_write }, 5774 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, 5775 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, 5776 .access = PL3_W, .type = ARM_CP_NO_RAW, 5777 .writefn = tlbi_aa64_vae3_write }, 5778 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, 5779 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, 5780 .access = PL3_W, .type = ARM_CP_NO_RAW, 5781 .writefn = tlbi_aa64_vae3_write }, 5782 REGINFO_SENTINEL 5783 }; 5784 5785 #ifndef CONFIG_USER_ONLY 5786 /* Test if system register redirection is to occur in the current state. */ 5787 static bool redirect_for_e2h(CPUARMState *env) 5788 { 5789 return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H); 5790 } 5791 5792 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri) 5793 { 5794 CPReadFn *readfn; 5795 5796 if (redirect_for_e2h(env)) { 5797 /* Switch to the saved EL2 version of the register. */ 5798 ri = ri->opaque; 5799 readfn = ri->readfn; 5800 } else { 5801 readfn = ri->orig_readfn; 5802 } 5803 if (readfn == NULL) { 5804 readfn = raw_read; 5805 } 5806 return readfn(env, ri); 5807 } 5808 5809 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri, 5810 uint64_t value) 5811 { 5812 CPWriteFn *writefn; 5813 5814 if (redirect_for_e2h(env)) { 5815 /* Switch to the saved EL2 version of the register. */ 5816 ri = ri->opaque; 5817 writefn = ri->writefn; 5818 } else { 5819 writefn = ri->orig_writefn; 5820 } 5821 if (writefn == NULL) { 5822 writefn = raw_write; 5823 } 5824 writefn(env, ri, value); 5825 } 5826 5827 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) 5828 { 5829 struct E2HAlias { 5830 uint32_t src_key, dst_key, new_key; 5831 const char *src_name, *dst_name, *new_name; 5832 bool (*feature)(const ARMISARegisters *id); 5833 }; 5834 5835 #define K(op0, op1, crn, crm, op2) \ 5836 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) 5837 5838 static const struct E2HAlias aliases[] = { 5839 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0), 5840 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" }, 5841 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2), 5842 "CPACR", "CPTR_EL2", "CPACR_EL12" }, 5843 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0), 5844 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" }, 5845 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1), 5846 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" }, 5847 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2), 5848 "TCR_EL1", "TCR_EL2", "TCR_EL12" }, 5849 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0), 5850 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" }, 5851 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1), 5852 "ELR_EL1", "ELR_EL2", "ELR_EL12" }, 5853 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0), 5854 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" }, 5855 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1), 5856 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" }, 5857 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0), 5858 "ESR_EL1", "ESR_EL2", "ESR_EL12" }, 5859 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0), 5860 "FAR_EL1", "FAR_EL2", "FAR_EL12" }, 5861 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0), 5862 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" }, 5863 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0), 5864 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" }, 5865 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0), 5866 "VBAR", "VBAR_EL2", "VBAR_EL12" }, 5867 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1), 5868 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" }, 5869 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0), 5870 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" }, 5871 5872 /* 5873 * Note that redirection of ZCR is mentioned in the description 5874 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but 5875 * not in the summary table. 5876 */ 5877 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0), 5878 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve }, 5879 5880 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ 5881 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ 5882 }; 5883 #undef K 5884 5885 size_t i; 5886 5887 for (i = 0; i < ARRAY_SIZE(aliases); i++) { 5888 const struct E2HAlias *a = &aliases[i]; 5889 ARMCPRegInfo *src_reg, *dst_reg; 5890 5891 if (a->feature && !a->feature(&cpu->isar)) { 5892 continue; 5893 } 5894 5895 src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key); 5896 dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key); 5897 g_assert(src_reg != NULL); 5898 g_assert(dst_reg != NULL); 5899 5900 /* Cross-compare names to detect typos in the keys. */ 5901 g_assert(strcmp(src_reg->name, a->src_name) == 0); 5902 g_assert(strcmp(dst_reg->name, a->dst_name) == 0); 5903 5904 /* None of the core system registers use opaque; we will. */ 5905 g_assert(src_reg->opaque == NULL); 5906 5907 /* Create alias before redirection so we dup the right data. */ 5908 if (a->new_key) { 5909 ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo)); 5910 uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t)); 5911 bool ok; 5912 5913 new_reg->name = a->new_name; 5914 new_reg->type |= ARM_CP_ALIAS; 5915 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ 5916 new_reg->access &= PL2_RW | PL3_RW; 5917 5918 ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg); 5919 g_assert(ok); 5920 } 5921 5922 src_reg->opaque = dst_reg; 5923 src_reg->orig_readfn = src_reg->readfn ?: raw_read; 5924 src_reg->orig_writefn = src_reg->writefn ?: raw_write; 5925 if (!src_reg->raw_readfn) { 5926 src_reg->raw_readfn = raw_read; 5927 } 5928 if (!src_reg->raw_writefn) { 5929 src_reg->raw_writefn = raw_write; 5930 } 5931 src_reg->readfn = el2_e2h_read; 5932 src_reg->writefn = el2_e2h_write; 5933 } 5934 } 5935 #endif 5936 5937 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 5938 bool isread) 5939 { 5940 int cur_el = arm_current_el(env); 5941 5942 if (cur_el < 2) { 5943 uint64_t hcr = arm_hcr_el2_eff(env); 5944 5945 if (cur_el == 0) { 5946 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 5947 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) { 5948 return CP_ACCESS_TRAP_EL2; 5949 } 5950 } else { 5951 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { 5952 return CP_ACCESS_TRAP; 5953 } 5954 if (hcr & HCR_TID2) { 5955 return CP_ACCESS_TRAP_EL2; 5956 } 5957 } 5958 } else if (hcr & HCR_TID2) { 5959 return CP_ACCESS_TRAP_EL2; 5960 } 5961 } 5962 5963 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) { 5964 return CP_ACCESS_TRAP_EL2; 5965 } 5966 5967 return CP_ACCESS_OK; 5968 } 5969 5970 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 5971 uint64_t value) 5972 { 5973 /* Writes to OSLAR_EL1 may update the OS lock status, which can be 5974 * read via a bit in OSLSR_EL1. 5975 */ 5976 int oslock; 5977 5978 if (ri->state == ARM_CP_STATE_AA32) { 5979 oslock = (value == 0xC5ACCE55); 5980 } else { 5981 oslock = value & 1; 5982 } 5983 5984 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 5985 } 5986 5987 static const ARMCPRegInfo debug_cp_reginfo[] = { 5988 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 5989 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 5990 * unlike DBGDRAR it is never accessible from EL0. 5991 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 5992 * accessor. 5993 */ 5994 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 5995 .access = PL0_R, .accessfn = access_tdra, 5996 .type = ARM_CP_CONST, .resetvalue = 0 }, 5997 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 5998 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 5999 .access = PL1_R, .accessfn = access_tdra, 6000 .type = ARM_CP_CONST, .resetvalue = 0 }, 6001 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 6002 .access = PL0_R, .accessfn = access_tdra, 6003 .type = ARM_CP_CONST, .resetvalue = 0 }, 6004 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 6005 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 6006 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 6007 .access = PL1_RW, .accessfn = access_tda, 6008 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 6009 .resetvalue = 0 }, 6010 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. 6011 * We don't implement the configurable EL0 access. 6012 */ 6013 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH, 6014 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 6015 .type = ARM_CP_ALIAS, 6016 .access = PL1_R, .accessfn = access_tda, 6017 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 6018 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 6019 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 6020 .access = PL1_W, .type = ARM_CP_NO_RAW, 6021 .accessfn = access_tdosa, 6022 .writefn = oslar_write }, 6023 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 6024 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 6025 .access = PL1_R, .resetvalue = 10, 6026 .accessfn = access_tdosa, 6027 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 6028 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 6029 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 6030 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 6031 .access = PL1_RW, .accessfn = access_tdosa, 6032 .type = ARM_CP_NOP }, 6033 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't 6034 * implement vector catch debug events yet. 6035 */ 6036 { .name = "DBGVCR", 6037 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 6038 .access = PL1_RW, .accessfn = access_tda, 6039 .type = ARM_CP_NOP }, 6040 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 6041 * to save and restore a 32-bit guest's DBGVCR) 6042 */ 6043 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 6044 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 6045 .access = PL2_RW, .accessfn = access_tda, 6046 .type = ARM_CP_NOP }, 6047 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications 6048 * Channel but Linux may try to access this register. The 32-bit 6049 * alias is DBGDCCINT. 6050 */ 6051 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 6052 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 6053 .access = PL1_RW, .accessfn = access_tda, 6054 .type = ARM_CP_NOP }, 6055 REGINFO_SENTINEL 6056 }; 6057 6058 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 6059 /* 64 bit access versions of the (dummy) debug registers */ 6060 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 6061 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 6062 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 6063 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 6064 REGINFO_SENTINEL 6065 }; 6066 6067 /* Return the exception level to which exceptions should be taken 6068 * via SVEAccessTrap. If an exception should be routed through 6069 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should 6070 * take care of raising that exception. 6071 * C.f. the ARM pseudocode function CheckSVEEnabled. 6072 */ 6073 int sve_exception_el(CPUARMState *env, int el) 6074 { 6075 #ifndef CONFIG_USER_ONLY 6076 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 6077 6078 if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 6079 bool disabled = false; 6080 6081 /* The CPACR.ZEN controls traps to EL1: 6082 * 0, 2 : trap EL0 and EL1 accesses 6083 * 1 : trap only EL0 accesses 6084 * 3 : trap no accesses 6085 */ 6086 if (!extract32(env->cp15.cpacr_el1, 16, 1)) { 6087 disabled = true; 6088 } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) { 6089 disabled = el == 0; 6090 } 6091 if (disabled) { 6092 /* route_to_el2 */ 6093 return hcr_el2 & HCR_TGE ? 2 : 1; 6094 } 6095 6096 /* Check CPACR.FPEN. */ 6097 if (!extract32(env->cp15.cpacr_el1, 20, 1)) { 6098 disabled = true; 6099 } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) { 6100 disabled = el == 0; 6101 } 6102 if (disabled) { 6103 return 0; 6104 } 6105 } 6106 6107 /* CPTR_EL2. Since TZ and TFP are positive, 6108 * they will be zero when EL2 is not present. 6109 */ 6110 if (el <= 2 && !arm_is_secure_below_el3(env)) { 6111 if (env->cp15.cptr_el[2] & CPTR_TZ) { 6112 return 2; 6113 } 6114 if (env->cp15.cptr_el[2] & CPTR_TFP) { 6115 return 0; 6116 } 6117 } 6118 6119 /* CPTR_EL3. Since EZ is negative we must check for EL3. */ 6120 if (arm_feature(env, ARM_FEATURE_EL3) 6121 && !(env->cp15.cptr_el[3] & CPTR_EZ)) { 6122 return 3; 6123 } 6124 #endif 6125 return 0; 6126 } 6127 6128 static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len) 6129 { 6130 uint32_t end_len; 6131 6132 end_len = start_len &= 0xf; 6133 if (!test_bit(start_len, cpu->sve_vq_map)) { 6134 end_len = find_last_bit(cpu->sve_vq_map, start_len); 6135 assert(end_len < start_len); 6136 } 6137 return end_len; 6138 } 6139 6140 /* 6141 * Given that SVE is enabled, return the vector length for EL. 6142 */ 6143 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) 6144 { 6145 ARMCPU *cpu = env_archcpu(env); 6146 uint32_t zcr_len = cpu->sve_max_vq - 1; 6147 6148 if (el <= 1) { 6149 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]); 6150 } 6151 if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) { 6152 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); 6153 } 6154 if (arm_feature(env, ARM_FEATURE_EL3)) { 6155 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); 6156 } 6157 6158 return sve_zcr_get_valid_len(cpu, zcr_len); 6159 } 6160 6161 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6162 uint64_t value) 6163 { 6164 int cur_el = arm_current_el(env); 6165 int old_len = sve_zcr_len_for_el(env, cur_el); 6166 int new_len; 6167 6168 /* Bits other than [3:0] are RAZ/WI. */ 6169 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16); 6170 raw_write(env, ri, value & 0xf); 6171 6172 /* 6173 * Because we arrived here, we know both FP and SVE are enabled; 6174 * otherwise we would have trapped access to the ZCR_ELn register. 6175 */ 6176 new_len = sve_zcr_len_for_el(env, cur_el); 6177 if (new_len < old_len) { 6178 aarch64_sve_narrow_vq(env, new_len + 1); 6179 } 6180 } 6181 6182 static const ARMCPRegInfo zcr_el1_reginfo = { 6183 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, 6184 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, 6185 .access = PL1_RW, .type = ARM_CP_SVE, 6186 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), 6187 .writefn = zcr_write, .raw_writefn = raw_write 6188 }; 6189 6190 static const ARMCPRegInfo zcr_el2_reginfo = { 6191 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 6192 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 6193 .access = PL2_RW, .type = ARM_CP_SVE, 6194 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), 6195 .writefn = zcr_write, .raw_writefn = raw_write 6196 }; 6197 6198 static const ARMCPRegInfo zcr_no_el2_reginfo = { 6199 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 6200 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 6201 .access = PL2_RW, .type = ARM_CP_SVE, 6202 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore 6203 }; 6204 6205 static const ARMCPRegInfo zcr_el3_reginfo = { 6206 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, 6207 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, 6208 .access = PL3_RW, .type = ARM_CP_SVE, 6209 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), 6210 .writefn = zcr_write, .raw_writefn = raw_write 6211 }; 6212 6213 void hw_watchpoint_update(ARMCPU *cpu, int n) 6214 { 6215 CPUARMState *env = &cpu->env; 6216 vaddr len = 0; 6217 vaddr wvr = env->cp15.dbgwvr[n]; 6218 uint64_t wcr = env->cp15.dbgwcr[n]; 6219 int mask; 6220 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 6221 6222 if (env->cpu_watchpoint[n]) { 6223 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 6224 env->cpu_watchpoint[n] = NULL; 6225 } 6226 6227 if (!extract64(wcr, 0, 1)) { 6228 /* E bit clear : watchpoint disabled */ 6229 return; 6230 } 6231 6232 switch (extract64(wcr, 3, 2)) { 6233 case 0: 6234 /* LSC 00 is reserved and must behave as if the wp is disabled */ 6235 return; 6236 case 1: 6237 flags |= BP_MEM_READ; 6238 break; 6239 case 2: 6240 flags |= BP_MEM_WRITE; 6241 break; 6242 case 3: 6243 flags |= BP_MEM_ACCESS; 6244 break; 6245 } 6246 6247 /* Attempts to use both MASK and BAS fields simultaneously are 6248 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 6249 * thus generating a watchpoint for every byte in the masked region. 6250 */ 6251 mask = extract64(wcr, 24, 4); 6252 if (mask == 1 || mask == 2) { 6253 /* Reserved values of MASK; we must act as if the mask value was 6254 * some non-reserved value, or as if the watchpoint were disabled. 6255 * We choose the latter. 6256 */ 6257 return; 6258 } else if (mask) { 6259 /* Watchpoint covers an aligned area up to 2GB in size */ 6260 len = 1ULL << mask; 6261 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 6262 * whether the watchpoint fires when the unmasked bits match; we opt 6263 * to generate the exceptions. 6264 */ 6265 wvr &= ~(len - 1); 6266 } else { 6267 /* Watchpoint covers bytes defined by the byte address select bits */ 6268 int bas = extract64(wcr, 5, 8); 6269 int basstart; 6270 6271 if (extract64(wvr, 2, 1)) { 6272 /* Deprecated case of an only 4-aligned address. BAS[7:4] are 6273 * ignored, and BAS[3:0] define which bytes to watch. 6274 */ 6275 bas &= 0xf; 6276 } 6277 6278 if (bas == 0) { 6279 /* This must act as if the watchpoint is disabled */ 6280 return; 6281 } 6282 6283 /* The BAS bits are supposed to be programmed to indicate a contiguous 6284 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 6285 * we fire for each byte in the word/doubleword addressed by the WVR. 6286 * We choose to ignore any non-zero bits after the first range of 1s. 6287 */ 6288 basstart = ctz32(bas); 6289 len = cto32(bas >> basstart); 6290 wvr += basstart; 6291 } 6292 6293 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 6294 &env->cpu_watchpoint[n]); 6295 } 6296 6297 void hw_watchpoint_update_all(ARMCPU *cpu) 6298 { 6299 int i; 6300 CPUARMState *env = &cpu->env; 6301 6302 /* Completely clear out existing QEMU watchpoints and our array, to 6303 * avoid possible stale entries following migration load. 6304 */ 6305 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 6306 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 6307 6308 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 6309 hw_watchpoint_update(cpu, i); 6310 } 6311 } 6312 6313 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6314 uint64_t value) 6315 { 6316 ARMCPU *cpu = env_archcpu(env); 6317 int i = ri->crm; 6318 6319 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the 6320 * register reads and behaves as if values written are sign extended. 6321 * Bits [1:0] are RES0. 6322 */ 6323 value = sextract64(value, 0, 49) & ~3ULL; 6324 6325 raw_write(env, ri, value); 6326 hw_watchpoint_update(cpu, i); 6327 } 6328 6329 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6330 uint64_t value) 6331 { 6332 ARMCPU *cpu = env_archcpu(env); 6333 int i = ri->crm; 6334 6335 raw_write(env, ri, value); 6336 hw_watchpoint_update(cpu, i); 6337 } 6338 6339 void hw_breakpoint_update(ARMCPU *cpu, int n) 6340 { 6341 CPUARMState *env = &cpu->env; 6342 uint64_t bvr = env->cp15.dbgbvr[n]; 6343 uint64_t bcr = env->cp15.dbgbcr[n]; 6344 vaddr addr; 6345 int bt; 6346 int flags = BP_CPU; 6347 6348 if (env->cpu_breakpoint[n]) { 6349 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 6350 env->cpu_breakpoint[n] = NULL; 6351 } 6352 6353 if (!extract64(bcr, 0, 1)) { 6354 /* E bit clear : watchpoint disabled */ 6355 return; 6356 } 6357 6358 bt = extract64(bcr, 20, 4); 6359 6360 switch (bt) { 6361 case 4: /* unlinked address mismatch (reserved if AArch64) */ 6362 case 5: /* linked address mismatch (reserved if AArch64) */ 6363 qemu_log_mask(LOG_UNIMP, 6364 "arm: address mismatch breakpoint types not implemented\n"); 6365 return; 6366 case 0: /* unlinked address match */ 6367 case 1: /* linked address match */ 6368 { 6369 /* Bits [63:49] are hardwired to the value of bit [48]; that is, 6370 * we behave as if the register was sign extended. Bits [1:0] are 6371 * RES0. The BAS field is used to allow setting breakpoints on 16 6372 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether 6373 * a bp will fire if the addresses covered by the bp and the addresses 6374 * covered by the insn overlap but the insn doesn't start at the 6375 * start of the bp address range. We choose to require the insn and 6376 * the bp to have the same address. The constraints on writing to 6377 * BAS enforced in dbgbcr_write mean we have only four cases: 6378 * 0b0000 => no breakpoint 6379 * 0b0011 => breakpoint on addr 6380 * 0b1100 => breakpoint on addr + 2 6381 * 0b1111 => breakpoint on addr 6382 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 6383 */ 6384 int bas = extract64(bcr, 5, 4); 6385 addr = sextract64(bvr, 0, 49) & ~3ULL; 6386 if (bas == 0) { 6387 return; 6388 } 6389 if (bas == 0xc) { 6390 addr += 2; 6391 } 6392 break; 6393 } 6394 case 2: /* unlinked context ID match */ 6395 case 8: /* unlinked VMID match (reserved if no EL2) */ 6396 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 6397 qemu_log_mask(LOG_UNIMP, 6398 "arm: unlinked context breakpoint types not implemented\n"); 6399 return; 6400 case 9: /* linked VMID match (reserved if no EL2) */ 6401 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 6402 case 3: /* linked context ID match */ 6403 default: 6404 /* We must generate no events for Linked context matches (unless 6405 * they are linked to by some other bp/wp, which is handled in 6406 * updates for the linking bp/wp). We choose to also generate no events 6407 * for reserved values. 6408 */ 6409 return; 6410 } 6411 6412 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 6413 } 6414 6415 void hw_breakpoint_update_all(ARMCPU *cpu) 6416 { 6417 int i; 6418 CPUARMState *env = &cpu->env; 6419 6420 /* Completely clear out existing QEMU breakpoints and our array, to 6421 * avoid possible stale entries following migration load. 6422 */ 6423 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 6424 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 6425 6426 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 6427 hw_breakpoint_update(cpu, i); 6428 } 6429 } 6430 6431 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6432 uint64_t value) 6433 { 6434 ARMCPU *cpu = env_archcpu(env); 6435 int i = ri->crm; 6436 6437 raw_write(env, ri, value); 6438 hw_breakpoint_update(cpu, i); 6439 } 6440 6441 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6442 uint64_t value) 6443 { 6444 ARMCPU *cpu = env_archcpu(env); 6445 int i = ri->crm; 6446 6447 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 6448 * copy of BAS[0]. 6449 */ 6450 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 6451 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 6452 6453 raw_write(env, ri, value); 6454 hw_breakpoint_update(cpu, i); 6455 } 6456 6457 static void define_debug_regs(ARMCPU *cpu) 6458 { 6459 /* Define v7 and v8 architectural debug registers. 6460 * These are just dummy implementations for now. 6461 */ 6462 int i; 6463 int wrps, brps, ctx_cmps; 6464 ARMCPRegInfo dbgdidr = { 6465 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 6466 .access = PL0_R, .accessfn = access_tda, 6467 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, 6468 }; 6469 6470 /* Note that all these register fields hold "number of Xs minus 1". */ 6471 brps = arm_num_brps(cpu); 6472 wrps = arm_num_wrps(cpu); 6473 ctx_cmps = arm_num_ctx_cmps(cpu); 6474 6475 assert(ctx_cmps <= brps); 6476 6477 define_one_arm_cp_reg(cpu, &dbgdidr); 6478 define_arm_cp_regs(cpu, debug_cp_reginfo); 6479 6480 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 6481 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 6482 } 6483 6484 for (i = 0; i < brps; i++) { 6485 ARMCPRegInfo dbgregs[] = { 6486 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, 6487 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 6488 .access = PL1_RW, .accessfn = access_tda, 6489 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 6490 .writefn = dbgbvr_write, .raw_writefn = raw_write 6491 }, 6492 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, 6493 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 6494 .access = PL1_RW, .accessfn = access_tda, 6495 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 6496 .writefn = dbgbcr_write, .raw_writefn = raw_write 6497 }, 6498 REGINFO_SENTINEL 6499 }; 6500 define_arm_cp_regs(cpu, dbgregs); 6501 } 6502 6503 for (i = 0; i < wrps; i++) { 6504 ARMCPRegInfo dbgregs[] = { 6505 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, 6506 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 6507 .access = PL1_RW, .accessfn = access_tda, 6508 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 6509 .writefn = dbgwvr_write, .raw_writefn = raw_write 6510 }, 6511 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, 6512 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 6513 .access = PL1_RW, .accessfn = access_tda, 6514 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 6515 .writefn = dbgwcr_write, .raw_writefn = raw_write 6516 }, 6517 REGINFO_SENTINEL 6518 }; 6519 define_arm_cp_regs(cpu, dbgregs); 6520 } 6521 } 6522 6523 static void define_pmu_regs(ARMCPU *cpu) 6524 { 6525 /* 6526 * v7 performance monitor control register: same implementor 6527 * field as main ID register, and we implement four counters in 6528 * addition to the cycle count register. 6529 */ 6530 unsigned int i, pmcrn = 4; 6531 ARMCPRegInfo pmcr = { 6532 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, 6533 .access = PL0_RW, 6534 .type = ARM_CP_IO | ARM_CP_ALIAS, 6535 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), 6536 .accessfn = pmreg_access, .writefn = pmcr_write, 6537 .raw_writefn = raw_write, 6538 }; 6539 ARMCPRegInfo pmcr64 = { 6540 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, 6541 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, 6542 .access = PL0_RW, .accessfn = pmreg_access, 6543 .type = ARM_CP_IO, 6544 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), 6545 .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) | 6546 PMCRLC, 6547 .writefn = pmcr_write, .raw_writefn = raw_write, 6548 }; 6549 define_one_arm_cp_reg(cpu, &pmcr); 6550 define_one_arm_cp_reg(cpu, &pmcr64); 6551 for (i = 0; i < pmcrn; i++) { 6552 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); 6553 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); 6554 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); 6555 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); 6556 ARMCPRegInfo pmev_regs[] = { 6557 { .name = pmevcntr_name, .cp = 15, .crn = 14, 6558 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6559 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6560 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6561 .accessfn = pmreg_access }, 6562 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, 6563 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), 6564 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 6565 .type = ARM_CP_IO, 6566 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6567 .raw_readfn = pmevcntr_rawread, 6568 .raw_writefn = pmevcntr_rawwrite }, 6569 { .name = pmevtyper_name, .cp = 15, .crn = 14, 6570 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6571 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6572 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6573 .accessfn = pmreg_access }, 6574 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, 6575 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), 6576 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 6577 .type = ARM_CP_IO, 6578 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6579 .raw_writefn = pmevtyper_rawwrite }, 6580 REGINFO_SENTINEL 6581 }; 6582 define_arm_cp_regs(cpu, pmev_regs); 6583 g_free(pmevcntr_name); 6584 g_free(pmevcntr_el0_name); 6585 g_free(pmevtyper_name); 6586 g_free(pmevtyper_el0_name); 6587 } 6588 if (cpu_isar_feature(aa32_pmu_8_1, cpu)) { 6589 ARMCPRegInfo v81_pmu_regs[] = { 6590 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, 6591 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, 6592 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6593 .resetvalue = extract64(cpu->pmceid0, 32, 32) }, 6594 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, 6595 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, 6596 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6597 .resetvalue = extract64(cpu->pmceid1, 32, 32) }, 6598 REGINFO_SENTINEL 6599 }; 6600 define_arm_cp_regs(cpu, v81_pmu_regs); 6601 } 6602 if (cpu_isar_feature(any_pmu_8_4, cpu)) { 6603 static const ARMCPRegInfo v84_pmmir = { 6604 .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, 6605 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, 6606 .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6607 .resetvalue = 0 6608 }; 6609 define_one_arm_cp_reg(cpu, &v84_pmmir); 6610 } 6611 } 6612 6613 /* We don't know until after realize whether there's a GICv3 6614 * attached, and that is what registers the gicv3 sysregs. 6615 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 6616 * at runtime. 6617 */ 6618 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 6619 { 6620 ARMCPU *cpu = env_archcpu(env); 6621 uint64_t pfr1 = cpu->id_pfr1; 6622 6623 if (env->gicv3state) { 6624 pfr1 |= 1 << 28; 6625 } 6626 return pfr1; 6627 } 6628 6629 #ifndef CONFIG_USER_ONLY 6630 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 6631 { 6632 ARMCPU *cpu = env_archcpu(env); 6633 uint64_t pfr0 = cpu->isar.id_aa64pfr0; 6634 6635 if (env->gicv3state) { 6636 pfr0 |= 1 << 24; 6637 } 6638 return pfr0; 6639 } 6640 #endif 6641 6642 /* Shared logic between LORID and the rest of the LOR* registers. 6643 * Secure state has already been delt with. 6644 */ 6645 static CPAccessResult access_lor_ns(CPUARMState *env) 6646 { 6647 int el = arm_current_el(env); 6648 6649 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) { 6650 return CP_ACCESS_TRAP_EL2; 6651 } 6652 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { 6653 return CP_ACCESS_TRAP_EL3; 6654 } 6655 return CP_ACCESS_OK; 6656 } 6657 6658 static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri, 6659 bool isread) 6660 { 6661 if (arm_is_secure_below_el3(env)) { 6662 /* Access ok in secure mode. */ 6663 return CP_ACCESS_OK; 6664 } 6665 return access_lor_ns(env); 6666 } 6667 6668 static CPAccessResult access_lor_other(CPUARMState *env, 6669 const ARMCPRegInfo *ri, bool isread) 6670 { 6671 if (arm_is_secure_below_el3(env)) { 6672 /* Access denied in secure mode. */ 6673 return CP_ACCESS_TRAP; 6674 } 6675 return access_lor_ns(env); 6676 } 6677 6678 /* 6679 * A trivial implementation of ARMv8.1-LOR leaves all of these 6680 * registers fixed at 0, which indicates that there are zero 6681 * supported Limited Ordering regions. 6682 */ 6683 static const ARMCPRegInfo lor_reginfo[] = { 6684 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64, 6685 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0, 6686 .access = PL1_RW, .accessfn = access_lor_other, 6687 .type = ARM_CP_CONST, .resetvalue = 0 }, 6688 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64, 6689 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1, 6690 .access = PL1_RW, .accessfn = access_lor_other, 6691 .type = ARM_CP_CONST, .resetvalue = 0 }, 6692 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64, 6693 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2, 6694 .access = PL1_RW, .accessfn = access_lor_other, 6695 .type = ARM_CP_CONST, .resetvalue = 0 }, 6696 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64, 6697 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3, 6698 .access = PL1_RW, .accessfn = access_lor_other, 6699 .type = ARM_CP_CONST, .resetvalue = 0 }, 6700 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64, 6701 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, 6702 .access = PL1_R, .accessfn = access_lorid, 6703 .type = ARM_CP_CONST, .resetvalue = 0 }, 6704 REGINFO_SENTINEL 6705 }; 6706 6707 #ifdef TARGET_AARCH64 6708 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, 6709 bool isread) 6710 { 6711 int el = arm_current_el(env); 6712 6713 if (el < 2 && 6714 arm_feature(env, ARM_FEATURE_EL2) && 6715 !(arm_hcr_el2_eff(env) & HCR_APK)) { 6716 return CP_ACCESS_TRAP_EL2; 6717 } 6718 if (el < 3 && 6719 arm_feature(env, ARM_FEATURE_EL3) && 6720 !(env->cp15.scr_el3 & SCR_APK)) { 6721 return CP_ACCESS_TRAP_EL3; 6722 } 6723 return CP_ACCESS_OK; 6724 } 6725 6726 static const ARMCPRegInfo pauth_reginfo[] = { 6727 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6728 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0, 6729 .access = PL1_RW, .accessfn = access_pauth, 6730 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) }, 6731 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6732 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1, 6733 .access = PL1_RW, .accessfn = access_pauth, 6734 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) }, 6735 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6736 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2, 6737 .access = PL1_RW, .accessfn = access_pauth, 6738 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) }, 6739 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6740 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3, 6741 .access = PL1_RW, .accessfn = access_pauth, 6742 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) }, 6743 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6744 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0, 6745 .access = PL1_RW, .accessfn = access_pauth, 6746 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) }, 6747 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6748 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1, 6749 .access = PL1_RW, .accessfn = access_pauth, 6750 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) }, 6751 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6752 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0, 6753 .access = PL1_RW, .accessfn = access_pauth, 6754 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) }, 6755 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6756 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1, 6757 .access = PL1_RW, .accessfn = access_pauth, 6758 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) }, 6759 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6760 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2, 6761 .access = PL1_RW, .accessfn = access_pauth, 6762 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) }, 6763 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6764 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3, 6765 .access = PL1_RW, .accessfn = access_pauth, 6766 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) }, 6767 REGINFO_SENTINEL 6768 }; 6769 6770 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 6771 { 6772 Error *err = NULL; 6773 uint64_t ret; 6774 6775 /* Success sets NZCV = 0000. */ 6776 env->NF = env->CF = env->VF = 0, env->ZF = 1; 6777 6778 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) { 6779 /* 6780 * ??? Failed, for unknown reasons in the crypto subsystem. 6781 * The best we can do is log the reason and return the 6782 * timed-out indication to the guest. There is no reason 6783 * we know to expect this failure to be transitory, so the 6784 * guest may well hang retrying the operation. 6785 */ 6786 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", 6787 ri->name, error_get_pretty(err)); 6788 error_free(err); 6789 6790 env->ZF = 0; /* NZCF = 0100 */ 6791 return 0; 6792 } 6793 return ret; 6794 } 6795 6796 /* We do not support re-seeding, so the two registers operate the same. */ 6797 static const ARMCPRegInfo rndr_reginfo[] = { 6798 { .name = "RNDR", .state = ARM_CP_STATE_AA64, 6799 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 6800 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0, 6801 .access = PL0_R, .readfn = rndr_readfn }, 6802 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64, 6803 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 6804 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1, 6805 .access = PL0_R, .readfn = rndr_readfn }, 6806 REGINFO_SENTINEL 6807 }; 6808 6809 #ifndef CONFIG_USER_ONLY 6810 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, 6811 uint64_t value) 6812 { 6813 ARMCPU *cpu = env_archcpu(env); 6814 /* CTR_EL0 System register -> DminLine, bits [19:16] */ 6815 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); 6816 uint64_t vaddr_in = (uint64_t) value; 6817 uint64_t vaddr = vaddr_in & ~(dline_size - 1); 6818 void *haddr; 6819 int mem_idx = cpu_mmu_index(env, false); 6820 6821 /* This won't be crossing page boundaries */ 6822 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); 6823 if (haddr) { 6824 6825 ram_addr_t offset; 6826 MemoryRegion *mr; 6827 6828 /* RCU lock is already being held */ 6829 mr = memory_region_from_host(haddr, &offset); 6830 6831 if (mr) { 6832 memory_region_do_writeback(mr, offset, dline_size); 6833 } 6834 } 6835 } 6836 6837 static const ARMCPRegInfo dcpop_reg[] = { 6838 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64, 6839 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1, 6840 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, 6841 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, 6842 REGINFO_SENTINEL 6843 }; 6844 6845 static const ARMCPRegInfo dcpodp_reg[] = { 6846 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64, 6847 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1, 6848 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, 6849 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, 6850 REGINFO_SENTINEL 6851 }; 6852 #endif /*CONFIG_USER_ONLY*/ 6853 6854 #endif 6855 6856 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, 6857 bool isread) 6858 { 6859 int el = arm_current_el(env); 6860 6861 if (el == 0) { 6862 uint64_t sctlr = arm_sctlr(env, el); 6863 if (!(sctlr & SCTLR_EnRCTX)) { 6864 return CP_ACCESS_TRAP; 6865 } 6866 } else if (el == 1) { 6867 uint64_t hcr = arm_hcr_el2_eff(env); 6868 if (hcr & HCR_NV) { 6869 return CP_ACCESS_TRAP_EL2; 6870 } 6871 } 6872 return CP_ACCESS_OK; 6873 } 6874 6875 static const ARMCPRegInfo predinv_reginfo[] = { 6876 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64, 6877 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4, 6878 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6879 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64, 6880 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5, 6881 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6882 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64, 6883 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7, 6884 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6885 /* 6886 * Note the AArch32 opcodes have a different OPC1. 6887 */ 6888 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32, 6889 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4, 6890 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6891 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32, 6892 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5, 6893 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6894 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32, 6895 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7, 6896 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6897 REGINFO_SENTINEL 6898 }; 6899 6900 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri) 6901 { 6902 /* Read the high 32 bits of the current CCSIDR */ 6903 return extract64(ccsidr_read(env, ri), 32, 32); 6904 } 6905 6906 static const ARMCPRegInfo ccsidr2_reginfo[] = { 6907 { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH, 6908 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2, 6909 .access = PL1_R, 6910 .accessfn = access_aa64_tid2, 6911 .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW }, 6912 REGINFO_SENTINEL 6913 }; 6914 6915 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri, 6916 bool isread) 6917 { 6918 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) { 6919 return CP_ACCESS_TRAP_EL2; 6920 } 6921 6922 return CP_ACCESS_OK; 6923 } 6924 6925 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri, 6926 bool isread) 6927 { 6928 if (arm_feature(env, ARM_FEATURE_V8)) { 6929 return access_aa64_tid3(env, ri, isread); 6930 } 6931 6932 return CP_ACCESS_OK; 6933 } 6934 6935 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri, 6936 bool isread) 6937 { 6938 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) { 6939 return CP_ACCESS_TRAP_EL2; 6940 } 6941 6942 return CP_ACCESS_OK; 6943 } 6944 6945 static const ARMCPRegInfo jazelle_regs[] = { 6946 { .name = "JIDR", 6947 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0, 6948 .access = PL1_R, .accessfn = access_jazelle, 6949 .type = ARM_CP_CONST, .resetvalue = 0 }, 6950 { .name = "JOSCR", 6951 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0, 6952 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 6953 { .name = "JMCR", 6954 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0, 6955 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 6956 REGINFO_SENTINEL 6957 }; 6958 6959 static const ARMCPRegInfo vhe_reginfo[] = { 6960 { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64, 6961 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1, 6962 .access = PL2_RW, 6963 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) }, 6964 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64, 6965 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1, 6966 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write, 6967 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) }, 6968 #ifndef CONFIG_USER_ONLY 6969 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64, 6970 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2, 6971 .fieldoffset = 6972 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval), 6973 .type = ARM_CP_IO, .access = PL2_RW, 6974 .writefn = gt_hv_cval_write, .raw_writefn = raw_write }, 6975 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 6976 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0, 6977 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 6978 .resetfn = gt_hv_timer_reset, 6979 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write }, 6980 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH, 6981 .type = ARM_CP_IO, 6982 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1, 6983 .access = PL2_RW, 6984 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl), 6985 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write }, 6986 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64, 6987 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1, 6988 .type = ARM_CP_IO | ARM_CP_ALIAS, 6989 .access = PL2_RW, .accessfn = e2h_access, 6990 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 6991 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write }, 6992 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64, 6993 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1, 6994 .type = ARM_CP_IO | ARM_CP_ALIAS, 6995 .access = PL2_RW, .accessfn = e2h_access, 6996 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 6997 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write }, 6998 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64, 6999 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0, 7000 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, 7001 .access = PL2_RW, .accessfn = e2h_access, 7002 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write }, 7003 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64, 7004 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0, 7005 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, 7006 .access = PL2_RW, .accessfn = e2h_access, 7007 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write }, 7008 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64, 7009 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2, 7010 .type = ARM_CP_IO | ARM_CP_ALIAS, 7011 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 7012 .access = PL2_RW, .accessfn = e2h_access, 7013 .writefn = gt_phys_cval_write, .raw_writefn = raw_write }, 7014 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64, 7015 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2, 7016 .type = ARM_CP_IO | ARM_CP_ALIAS, 7017 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 7018 .access = PL2_RW, .accessfn = e2h_access, 7019 .writefn = gt_virt_cval_write, .raw_writefn = raw_write }, 7020 #endif 7021 REGINFO_SENTINEL 7022 }; 7023 7024 #ifndef CONFIG_USER_ONLY 7025 static const ARMCPRegInfo ats1e1_reginfo[] = { 7026 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 7027 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, 7028 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7029 .writefn = ats_write64 }, 7030 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 7031 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, 7032 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7033 .writefn = ats_write64 }, 7034 REGINFO_SENTINEL 7035 }; 7036 7037 static const ARMCPRegInfo ats1cp_reginfo[] = { 7038 { .name = "ATS1CPRP", 7039 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, 7040 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7041 .writefn = ats_write }, 7042 { .name = "ATS1CPWP", 7043 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, 7044 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7045 .writefn = ats_write }, 7046 REGINFO_SENTINEL 7047 }; 7048 #endif 7049 7050 /* 7051 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and 7052 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field 7053 * is non-zero, which is never for ARMv7, optionally in ARMv8 7054 * and mandatorily for ARMv8.2 and up. 7055 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's 7056 * implementation is RAZ/WI we can ignore this detail, as we 7057 * do for ACTLR. 7058 */ 7059 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = { 7060 { .name = "ACTLR2", .state = ARM_CP_STATE_AA32, 7061 .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3, 7062 .access = PL1_RW, .accessfn = access_tacr, 7063 .type = ARM_CP_CONST, .resetvalue = 0 }, 7064 { .name = "HACTLR2", .state = ARM_CP_STATE_AA32, 7065 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, 7066 .access = PL2_RW, .type = ARM_CP_CONST, 7067 .resetvalue = 0 }, 7068 REGINFO_SENTINEL 7069 }; 7070 7071 void register_cp_regs_for_features(ARMCPU *cpu) 7072 { 7073 /* Register all the coprocessor registers based on feature bits */ 7074 CPUARMState *env = &cpu->env; 7075 if (arm_feature(env, ARM_FEATURE_M)) { 7076 /* M profile has no coprocessor registers */ 7077 return; 7078 } 7079 7080 define_arm_cp_regs(cpu, cp_reginfo); 7081 if (!arm_feature(env, ARM_FEATURE_V8)) { 7082 /* Must go early as it is full of wildcards that may be 7083 * overridden by later definitions. 7084 */ 7085 define_arm_cp_regs(cpu, not_v8_cp_reginfo); 7086 } 7087 7088 if (arm_feature(env, ARM_FEATURE_V6)) { 7089 /* The ID registers all have impdef reset values */ 7090 ARMCPRegInfo v6_idregs[] = { 7091 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, 7092 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 7093 .access = PL1_R, .type = ARM_CP_CONST, 7094 .accessfn = access_aa32_tid3, 7095 .resetvalue = cpu->id_pfr0 }, 7096 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know 7097 * the value of the GIC field until after we define these regs. 7098 */ 7099 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, 7100 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, 7101 .access = PL1_R, .type = ARM_CP_NO_RAW, 7102 .accessfn = access_aa32_tid3, 7103 .readfn = id_pfr1_read, 7104 .writefn = arm_cp_write_ignore }, 7105 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, 7106 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, 7107 .access = PL1_R, .type = ARM_CP_CONST, 7108 .accessfn = access_aa32_tid3, 7109 .resetvalue = cpu->isar.id_dfr0 }, 7110 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, 7111 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, 7112 .access = PL1_R, .type = ARM_CP_CONST, 7113 .accessfn = access_aa32_tid3, 7114 .resetvalue = cpu->id_afr0 }, 7115 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, 7116 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, 7117 .access = PL1_R, .type = ARM_CP_CONST, 7118 .accessfn = access_aa32_tid3, 7119 .resetvalue = cpu->isar.id_mmfr0 }, 7120 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, 7121 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, 7122 .access = PL1_R, .type = ARM_CP_CONST, 7123 .accessfn = access_aa32_tid3, 7124 .resetvalue = cpu->isar.id_mmfr1 }, 7125 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, 7126 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, 7127 .access = PL1_R, .type = ARM_CP_CONST, 7128 .accessfn = access_aa32_tid3, 7129 .resetvalue = cpu->isar.id_mmfr2 }, 7130 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, 7131 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, 7132 .access = PL1_R, .type = ARM_CP_CONST, 7133 .accessfn = access_aa32_tid3, 7134 .resetvalue = cpu->isar.id_mmfr3 }, 7135 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, 7136 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 7137 .access = PL1_R, .type = ARM_CP_CONST, 7138 .accessfn = access_aa32_tid3, 7139 .resetvalue = cpu->isar.id_isar0 }, 7140 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, 7141 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, 7142 .access = PL1_R, .type = ARM_CP_CONST, 7143 .accessfn = access_aa32_tid3, 7144 .resetvalue = cpu->isar.id_isar1 }, 7145 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, 7146 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 7147 .access = PL1_R, .type = ARM_CP_CONST, 7148 .accessfn = access_aa32_tid3, 7149 .resetvalue = cpu->isar.id_isar2 }, 7150 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, 7151 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, 7152 .access = PL1_R, .type = ARM_CP_CONST, 7153 .accessfn = access_aa32_tid3, 7154 .resetvalue = cpu->isar.id_isar3 }, 7155 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, 7156 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, 7157 .access = PL1_R, .type = ARM_CP_CONST, 7158 .accessfn = access_aa32_tid3, 7159 .resetvalue = cpu->isar.id_isar4 }, 7160 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, 7161 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, 7162 .access = PL1_R, .type = ARM_CP_CONST, 7163 .accessfn = access_aa32_tid3, 7164 .resetvalue = cpu->isar.id_isar5 }, 7165 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, 7166 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, 7167 .access = PL1_R, .type = ARM_CP_CONST, 7168 .accessfn = access_aa32_tid3, 7169 .resetvalue = cpu->isar.id_mmfr4 }, 7170 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, 7171 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, 7172 .access = PL1_R, .type = ARM_CP_CONST, 7173 .accessfn = access_aa32_tid3, 7174 .resetvalue = cpu->isar.id_isar6 }, 7175 REGINFO_SENTINEL 7176 }; 7177 define_arm_cp_regs(cpu, v6_idregs); 7178 define_arm_cp_regs(cpu, v6_cp_reginfo); 7179 } else { 7180 define_arm_cp_regs(cpu, not_v6_cp_reginfo); 7181 } 7182 if (arm_feature(env, ARM_FEATURE_V6K)) { 7183 define_arm_cp_regs(cpu, v6k_cp_reginfo); 7184 } 7185 if (arm_feature(env, ARM_FEATURE_V7MP) && 7186 !arm_feature(env, ARM_FEATURE_PMSA)) { 7187 define_arm_cp_regs(cpu, v7mp_cp_reginfo); 7188 } 7189 if (arm_feature(env, ARM_FEATURE_V7VE)) { 7190 define_arm_cp_regs(cpu, pmovsset_cp_reginfo); 7191 } 7192 if (arm_feature(env, ARM_FEATURE_V7)) { 7193 ARMCPRegInfo clidr = { 7194 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, 7195 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, 7196 .access = PL1_R, .type = ARM_CP_CONST, 7197 .accessfn = access_aa64_tid2, 7198 .resetvalue = cpu->clidr 7199 }; 7200 define_one_arm_cp_reg(cpu, &clidr); 7201 define_arm_cp_regs(cpu, v7_cp_reginfo); 7202 define_debug_regs(cpu); 7203 define_pmu_regs(cpu); 7204 } else { 7205 define_arm_cp_regs(cpu, not_v7_cp_reginfo); 7206 } 7207 if (arm_feature(env, ARM_FEATURE_V8)) { 7208 /* AArch64 ID registers, which all have impdef reset values. 7209 * Note that within the ID register ranges the unused slots 7210 * must all RAZ, not UNDEF; future architecture versions may 7211 * define new registers here. 7212 */ 7213 ARMCPRegInfo v8_idregs[] = { 7214 /* 7215 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system 7216 * emulation because we don't know the right value for the 7217 * GIC field until after we define these regs. 7218 */ 7219 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, 7220 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, 7221 .access = PL1_R, 7222 #ifdef CONFIG_USER_ONLY 7223 .type = ARM_CP_CONST, 7224 .resetvalue = cpu->isar.id_aa64pfr0 7225 #else 7226 .type = ARM_CP_NO_RAW, 7227 .accessfn = access_aa64_tid3, 7228 .readfn = id_aa64pfr0_read, 7229 .writefn = arm_cp_write_ignore 7230 #endif 7231 }, 7232 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, 7233 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, 7234 .access = PL1_R, .type = ARM_CP_CONST, 7235 .accessfn = access_aa64_tid3, 7236 .resetvalue = cpu->isar.id_aa64pfr1}, 7237 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7238 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, 7239 .access = PL1_R, .type = ARM_CP_CONST, 7240 .accessfn = access_aa64_tid3, 7241 .resetvalue = 0 }, 7242 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7243 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, 7244 .access = PL1_R, .type = ARM_CP_CONST, 7245 .accessfn = access_aa64_tid3, 7246 .resetvalue = 0 }, 7247 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64, 7248 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, 7249 .access = PL1_R, .type = ARM_CP_CONST, 7250 .accessfn = access_aa64_tid3, 7251 /* At present, only SVEver == 0 is defined anyway. */ 7252 .resetvalue = 0 }, 7253 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7254 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, 7255 .access = PL1_R, .type = ARM_CP_CONST, 7256 .accessfn = access_aa64_tid3, 7257 .resetvalue = 0 }, 7258 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7259 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, 7260 .access = PL1_R, .type = ARM_CP_CONST, 7261 .accessfn = access_aa64_tid3, 7262 .resetvalue = 0 }, 7263 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7264 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, 7265 .access = PL1_R, .type = ARM_CP_CONST, 7266 .accessfn = access_aa64_tid3, 7267 .resetvalue = 0 }, 7268 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, 7269 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, 7270 .access = PL1_R, .type = ARM_CP_CONST, 7271 .accessfn = access_aa64_tid3, 7272 .resetvalue = cpu->isar.id_aa64dfr0 }, 7273 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, 7274 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, 7275 .access = PL1_R, .type = ARM_CP_CONST, 7276 .accessfn = access_aa64_tid3, 7277 .resetvalue = cpu->isar.id_aa64dfr1 }, 7278 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7279 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, 7280 .access = PL1_R, .type = ARM_CP_CONST, 7281 .accessfn = access_aa64_tid3, 7282 .resetvalue = 0 }, 7283 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7284 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, 7285 .access = PL1_R, .type = ARM_CP_CONST, 7286 .accessfn = access_aa64_tid3, 7287 .resetvalue = 0 }, 7288 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, 7289 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, 7290 .access = PL1_R, .type = ARM_CP_CONST, 7291 .accessfn = access_aa64_tid3, 7292 .resetvalue = cpu->id_aa64afr0 }, 7293 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, 7294 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, 7295 .access = PL1_R, .type = ARM_CP_CONST, 7296 .accessfn = access_aa64_tid3, 7297 .resetvalue = cpu->id_aa64afr1 }, 7298 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7299 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, 7300 .access = PL1_R, .type = ARM_CP_CONST, 7301 .accessfn = access_aa64_tid3, 7302 .resetvalue = 0 }, 7303 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7304 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, 7305 .access = PL1_R, .type = ARM_CP_CONST, 7306 .accessfn = access_aa64_tid3, 7307 .resetvalue = 0 }, 7308 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, 7309 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, 7310 .access = PL1_R, .type = ARM_CP_CONST, 7311 .accessfn = access_aa64_tid3, 7312 .resetvalue = cpu->isar.id_aa64isar0 }, 7313 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, 7314 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, 7315 .access = PL1_R, .type = ARM_CP_CONST, 7316 .accessfn = access_aa64_tid3, 7317 .resetvalue = cpu->isar.id_aa64isar1 }, 7318 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7319 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 7320 .access = PL1_R, .type = ARM_CP_CONST, 7321 .accessfn = access_aa64_tid3, 7322 .resetvalue = 0 }, 7323 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7324 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, 7325 .access = PL1_R, .type = ARM_CP_CONST, 7326 .accessfn = access_aa64_tid3, 7327 .resetvalue = 0 }, 7328 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7329 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, 7330 .access = PL1_R, .type = ARM_CP_CONST, 7331 .accessfn = access_aa64_tid3, 7332 .resetvalue = 0 }, 7333 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7334 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, 7335 .access = PL1_R, .type = ARM_CP_CONST, 7336 .accessfn = access_aa64_tid3, 7337 .resetvalue = 0 }, 7338 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7339 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, 7340 .access = PL1_R, .type = ARM_CP_CONST, 7341 .accessfn = access_aa64_tid3, 7342 .resetvalue = 0 }, 7343 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7344 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, 7345 .access = PL1_R, .type = ARM_CP_CONST, 7346 .accessfn = access_aa64_tid3, 7347 .resetvalue = 0 }, 7348 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, 7349 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 7350 .access = PL1_R, .type = ARM_CP_CONST, 7351 .accessfn = access_aa64_tid3, 7352 .resetvalue = cpu->isar.id_aa64mmfr0 }, 7353 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, 7354 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, 7355 .access = PL1_R, .type = ARM_CP_CONST, 7356 .accessfn = access_aa64_tid3, 7357 .resetvalue = cpu->isar.id_aa64mmfr1 }, 7358 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, 7359 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, 7360 .access = PL1_R, .type = ARM_CP_CONST, 7361 .accessfn = access_aa64_tid3, 7362 .resetvalue = cpu->isar.id_aa64mmfr2 }, 7363 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7364 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, 7365 .access = PL1_R, .type = ARM_CP_CONST, 7366 .accessfn = access_aa64_tid3, 7367 .resetvalue = 0 }, 7368 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7369 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, 7370 .access = PL1_R, .type = ARM_CP_CONST, 7371 .accessfn = access_aa64_tid3, 7372 .resetvalue = 0 }, 7373 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7374 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, 7375 .access = PL1_R, .type = ARM_CP_CONST, 7376 .accessfn = access_aa64_tid3, 7377 .resetvalue = 0 }, 7378 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7379 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, 7380 .access = PL1_R, .type = ARM_CP_CONST, 7381 .accessfn = access_aa64_tid3, 7382 .resetvalue = 0 }, 7383 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7384 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, 7385 .access = PL1_R, .type = ARM_CP_CONST, 7386 .accessfn = access_aa64_tid3, 7387 .resetvalue = 0 }, 7388 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, 7389 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 7390 .access = PL1_R, .type = ARM_CP_CONST, 7391 .accessfn = access_aa64_tid3, 7392 .resetvalue = cpu->isar.mvfr0 }, 7393 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, 7394 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 7395 .access = PL1_R, .type = ARM_CP_CONST, 7396 .accessfn = access_aa64_tid3, 7397 .resetvalue = cpu->isar.mvfr1 }, 7398 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, 7399 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 7400 .access = PL1_R, .type = ARM_CP_CONST, 7401 .accessfn = access_aa64_tid3, 7402 .resetvalue = cpu->isar.mvfr2 }, 7403 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7404 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, 7405 .access = PL1_R, .type = ARM_CP_CONST, 7406 .accessfn = access_aa64_tid3, 7407 .resetvalue = 0 }, 7408 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7409 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, 7410 .access = PL1_R, .type = ARM_CP_CONST, 7411 .accessfn = access_aa64_tid3, 7412 .resetvalue = 0 }, 7413 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7414 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, 7415 .access = PL1_R, .type = ARM_CP_CONST, 7416 .accessfn = access_aa64_tid3, 7417 .resetvalue = 0 }, 7418 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7419 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, 7420 .access = PL1_R, .type = ARM_CP_CONST, 7421 .accessfn = access_aa64_tid3, 7422 .resetvalue = 0 }, 7423 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7424 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, 7425 .access = PL1_R, .type = ARM_CP_CONST, 7426 .accessfn = access_aa64_tid3, 7427 .resetvalue = 0 }, 7428 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, 7429 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, 7430 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7431 .resetvalue = extract64(cpu->pmceid0, 0, 32) }, 7432 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, 7433 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, 7434 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7435 .resetvalue = cpu->pmceid0 }, 7436 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, 7437 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, 7438 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7439 .resetvalue = extract64(cpu->pmceid1, 0, 32) }, 7440 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, 7441 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, 7442 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7443 .resetvalue = cpu->pmceid1 }, 7444 REGINFO_SENTINEL 7445 }; 7446 #ifdef CONFIG_USER_ONLY 7447 ARMCPRegUserSpaceInfo v8_user_idregs[] = { 7448 { .name = "ID_AA64PFR0_EL1", 7449 .exported_bits = 0x000f000f00ff0000, 7450 .fixed_bits = 0x0000000000000011 }, 7451 { .name = "ID_AA64PFR1_EL1", 7452 .exported_bits = 0x00000000000000f0 }, 7453 { .name = "ID_AA64PFR*_EL1_RESERVED", 7454 .is_glob = true }, 7455 { .name = "ID_AA64ZFR0_EL1" }, 7456 { .name = "ID_AA64MMFR0_EL1", 7457 .fixed_bits = 0x00000000ff000000 }, 7458 { .name = "ID_AA64MMFR1_EL1" }, 7459 { .name = "ID_AA64MMFR*_EL1_RESERVED", 7460 .is_glob = true }, 7461 { .name = "ID_AA64DFR0_EL1", 7462 .fixed_bits = 0x0000000000000006 }, 7463 { .name = "ID_AA64DFR1_EL1" }, 7464 { .name = "ID_AA64DFR*_EL1_RESERVED", 7465 .is_glob = true }, 7466 { .name = "ID_AA64AFR*", 7467 .is_glob = true }, 7468 { .name = "ID_AA64ISAR0_EL1", 7469 .exported_bits = 0x00fffffff0fffff0 }, 7470 { .name = "ID_AA64ISAR1_EL1", 7471 .exported_bits = 0x000000f0ffffffff }, 7472 { .name = "ID_AA64ISAR*_EL1_RESERVED", 7473 .is_glob = true }, 7474 REGUSERINFO_SENTINEL 7475 }; 7476 modify_arm_cp_regs(v8_idregs, v8_user_idregs); 7477 #endif 7478 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ 7479 if (!arm_feature(env, ARM_FEATURE_EL3) && 7480 !arm_feature(env, ARM_FEATURE_EL2)) { 7481 ARMCPRegInfo rvbar = { 7482 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, 7483 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 7484 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar 7485 }; 7486 define_one_arm_cp_reg(cpu, &rvbar); 7487 } 7488 define_arm_cp_regs(cpu, v8_idregs); 7489 define_arm_cp_regs(cpu, v8_cp_reginfo); 7490 } 7491 if (arm_feature(env, ARM_FEATURE_EL2)) { 7492 uint64_t vmpidr_def = mpidr_read_val(env); 7493 ARMCPRegInfo vpidr_regs[] = { 7494 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, 7495 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7496 .access = PL2_RW, .accessfn = access_el3_aa32ns, 7497 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, 7498 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, 7499 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, 7500 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7501 .access = PL2_RW, .resetvalue = cpu->midr, 7502 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 7503 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, 7504 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7505 .access = PL2_RW, .accessfn = access_el3_aa32ns, 7506 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, 7507 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, 7508 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, 7509 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7510 .access = PL2_RW, 7511 .resetvalue = vmpidr_def, 7512 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 7513 REGINFO_SENTINEL 7514 }; 7515 define_arm_cp_regs(cpu, vpidr_regs); 7516 define_arm_cp_regs(cpu, el2_cp_reginfo); 7517 if (arm_feature(env, ARM_FEATURE_V8)) { 7518 define_arm_cp_regs(cpu, el2_v8_cp_reginfo); 7519 } 7520 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ 7521 if (!arm_feature(env, ARM_FEATURE_EL3)) { 7522 ARMCPRegInfo rvbar = { 7523 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, 7524 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, 7525 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar 7526 }; 7527 define_one_arm_cp_reg(cpu, &rvbar); 7528 } 7529 } else { 7530 /* If EL2 is missing but higher ELs are enabled, we need to 7531 * register the no_el2 reginfos. 7532 */ 7533 if (arm_feature(env, ARM_FEATURE_EL3)) { 7534 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value 7535 * of MIDR_EL1 and MPIDR_EL1. 7536 */ 7537 ARMCPRegInfo vpidr_regs[] = { 7538 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, 7539 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7540 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 7541 .type = ARM_CP_CONST, .resetvalue = cpu->midr, 7542 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 7543 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, 7544 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7545 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 7546 .type = ARM_CP_NO_RAW, 7547 .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, 7548 REGINFO_SENTINEL 7549 }; 7550 define_arm_cp_regs(cpu, vpidr_regs); 7551 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); 7552 if (arm_feature(env, ARM_FEATURE_V8)) { 7553 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo); 7554 } 7555 } 7556 } 7557 if (arm_feature(env, ARM_FEATURE_EL3)) { 7558 define_arm_cp_regs(cpu, el3_cp_reginfo); 7559 ARMCPRegInfo el3_regs[] = { 7560 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, 7561 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, 7562 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, 7563 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, 7564 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, 7565 .access = PL3_RW, 7566 .raw_writefn = raw_write, .writefn = sctlr_write, 7567 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), 7568 .resetvalue = cpu->reset_sctlr }, 7569 REGINFO_SENTINEL 7570 }; 7571 7572 define_arm_cp_regs(cpu, el3_regs); 7573 } 7574 /* The behaviour of NSACR is sufficiently various that we don't 7575 * try to describe it in a single reginfo: 7576 * if EL3 is 64 bit, then trap to EL3 from S EL1, 7577 * reads as constant 0xc00 from NS EL1 and NS EL2 7578 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 7579 * if v7 without EL3, register doesn't exist 7580 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 7581 */ 7582 if (arm_feature(env, ARM_FEATURE_EL3)) { 7583 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 7584 ARMCPRegInfo nsacr = { 7585 .name = "NSACR", .type = ARM_CP_CONST, 7586 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 7587 .access = PL1_RW, .accessfn = nsacr_access, 7588 .resetvalue = 0xc00 7589 }; 7590 define_one_arm_cp_reg(cpu, &nsacr); 7591 } else { 7592 ARMCPRegInfo nsacr = { 7593 .name = "NSACR", 7594 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 7595 .access = PL3_RW | PL1_R, 7596 .resetvalue = 0, 7597 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) 7598 }; 7599 define_one_arm_cp_reg(cpu, &nsacr); 7600 } 7601 } else { 7602 if (arm_feature(env, ARM_FEATURE_V8)) { 7603 ARMCPRegInfo nsacr = { 7604 .name = "NSACR", .type = ARM_CP_CONST, 7605 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 7606 .access = PL1_R, 7607 .resetvalue = 0xc00 7608 }; 7609 define_one_arm_cp_reg(cpu, &nsacr); 7610 } 7611 } 7612 7613 if (arm_feature(env, ARM_FEATURE_PMSA)) { 7614 if (arm_feature(env, ARM_FEATURE_V6)) { 7615 /* PMSAv6 not implemented */ 7616 assert(arm_feature(env, ARM_FEATURE_V7)); 7617 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 7618 define_arm_cp_regs(cpu, pmsav7_cp_reginfo); 7619 } else { 7620 define_arm_cp_regs(cpu, pmsav5_cp_reginfo); 7621 } 7622 } else { 7623 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 7624 define_arm_cp_regs(cpu, vmsa_cp_reginfo); 7625 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */ 7626 if (cpu_isar_feature(aa32_hpd, cpu)) { 7627 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo); 7628 } 7629 } 7630 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { 7631 define_arm_cp_regs(cpu, t2ee_cp_reginfo); 7632 } 7633 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { 7634 define_arm_cp_regs(cpu, generic_timer_cp_reginfo); 7635 } 7636 if (arm_feature(env, ARM_FEATURE_VAPA)) { 7637 define_arm_cp_regs(cpu, vapa_cp_reginfo); 7638 } 7639 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { 7640 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); 7641 } 7642 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { 7643 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); 7644 } 7645 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { 7646 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); 7647 } 7648 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 7649 define_arm_cp_regs(cpu, omap_cp_reginfo); 7650 } 7651 if (arm_feature(env, ARM_FEATURE_STRONGARM)) { 7652 define_arm_cp_regs(cpu, strongarm_cp_reginfo); 7653 } 7654 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 7655 define_arm_cp_regs(cpu, xscale_cp_reginfo); 7656 } 7657 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { 7658 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); 7659 } 7660 if (arm_feature(env, ARM_FEATURE_LPAE)) { 7661 define_arm_cp_regs(cpu, lpae_cp_reginfo); 7662 } 7663 if (cpu_isar_feature(aa32_jazelle, cpu)) { 7664 define_arm_cp_regs(cpu, jazelle_regs); 7665 } 7666 /* Slightly awkwardly, the OMAP and StrongARM cores need all of 7667 * cp15 crn=0 to be writes-ignored, whereas for other cores they should 7668 * be read-only (ie write causes UNDEF exception). 7669 */ 7670 { 7671 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { 7672 /* Pre-v8 MIDR space. 7673 * Note that the MIDR isn't a simple constant register because 7674 * of the TI925 behaviour where writes to another register can 7675 * cause the MIDR value to change. 7676 * 7677 * Unimplemented registers in the c15 0 0 0 space default to 7678 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR 7679 * and friends override accordingly. 7680 */ 7681 { .name = "MIDR", 7682 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, 7683 .access = PL1_R, .resetvalue = cpu->midr, 7684 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, 7685 .readfn = midr_read, 7686 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 7687 .type = ARM_CP_OVERRIDE }, 7688 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ 7689 { .name = "DUMMY", 7690 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, 7691 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7692 { .name = "DUMMY", 7693 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, 7694 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7695 { .name = "DUMMY", 7696 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, 7697 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7698 { .name = "DUMMY", 7699 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, 7700 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7701 { .name = "DUMMY", 7702 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, 7703 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7704 REGINFO_SENTINEL 7705 }; 7706 ARMCPRegInfo id_v8_midr_cp_reginfo[] = { 7707 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, 7708 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, 7709 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, 7710 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 7711 .readfn = midr_read }, 7712 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ 7713 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 7714 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 7715 .access = PL1_R, .resetvalue = cpu->midr }, 7716 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 7717 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, 7718 .access = PL1_R, .resetvalue = cpu->midr }, 7719 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, 7720 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, 7721 .access = PL1_R, 7722 .accessfn = access_aa64_tid1, 7723 .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, 7724 REGINFO_SENTINEL 7725 }; 7726 ARMCPRegInfo id_cp_reginfo[] = { 7727 /* These are common to v8 and pre-v8 */ 7728 { .name = "CTR", 7729 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, 7730 .access = PL1_R, .accessfn = ctr_el0_access, 7731 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 7732 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, 7733 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, 7734 .access = PL0_R, .accessfn = ctr_el0_access, 7735 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 7736 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ 7737 { .name = "TCMTR", 7738 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, 7739 .access = PL1_R, 7740 .accessfn = access_aa32_tid1, 7741 .type = ARM_CP_CONST, .resetvalue = 0 }, 7742 REGINFO_SENTINEL 7743 }; 7744 /* TLBTR is specific to VMSA */ 7745 ARMCPRegInfo id_tlbtr_reginfo = { 7746 .name = "TLBTR", 7747 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, 7748 .access = PL1_R, 7749 .accessfn = access_aa32_tid1, 7750 .type = ARM_CP_CONST, .resetvalue = 0, 7751 }; 7752 /* MPUIR is specific to PMSA V6+ */ 7753 ARMCPRegInfo id_mpuir_reginfo = { 7754 .name = "MPUIR", 7755 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 7756 .access = PL1_R, .type = ARM_CP_CONST, 7757 .resetvalue = cpu->pmsav7_dregion << 8 7758 }; 7759 ARMCPRegInfo crn0_wi_reginfo = { 7760 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, 7761 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, 7762 .type = ARM_CP_NOP | ARM_CP_OVERRIDE 7763 }; 7764 #ifdef CONFIG_USER_ONLY 7765 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = { 7766 { .name = "MIDR_EL1", 7767 .exported_bits = 0x00000000ffffffff }, 7768 { .name = "REVIDR_EL1" }, 7769 REGUSERINFO_SENTINEL 7770 }; 7771 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo); 7772 #endif 7773 if (arm_feature(env, ARM_FEATURE_OMAPCP) || 7774 arm_feature(env, ARM_FEATURE_STRONGARM)) { 7775 ARMCPRegInfo *r; 7776 /* Register the blanket "writes ignored" value first to cover the 7777 * whole space. Then update the specific ID registers to allow write 7778 * access, so that they ignore writes rather than causing them to 7779 * UNDEF. 7780 */ 7781 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); 7782 for (r = id_pre_v8_midr_cp_reginfo; 7783 r->type != ARM_CP_SENTINEL; r++) { 7784 r->access = PL1_RW; 7785 } 7786 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { 7787 r->access = PL1_RW; 7788 } 7789 id_mpuir_reginfo.access = PL1_RW; 7790 id_tlbtr_reginfo.access = PL1_RW; 7791 } 7792 if (arm_feature(env, ARM_FEATURE_V8)) { 7793 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); 7794 } else { 7795 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); 7796 } 7797 define_arm_cp_regs(cpu, id_cp_reginfo); 7798 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 7799 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); 7800 } else if (arm_feature(env, ARM_FEATURE_V7)) { 7801 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 7802 } 7803 } 7804 7805 if (arm_feature(env, ARM_FEATURE_MPIDR)) { 7806 ARMCPRegInfo mpidr_cp_reginfo[] = { 7807 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH, 7808 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, 7809 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, 7810 REGINFO_SENTINEL 7811 }; 7812 #ifdef CONFIG_USER_ONLY 7813 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = { 7814 { .name = "MPIDR_EL1", 7815 .fixed_bits = 0x0000000080000000 }, 7816 REGUSERINFO_SENTINEL 7817 }; 7818 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo); 7819 #endif 7820 define_arm_cp_regs(cpu, mpidr_cp_reginfo); 7821 } 7822 7823 if (arm_feature(env, ARM_FEATURE_AUXCR)) { 7824 ARMCPRegInfo auxcr_reginfo[] = { 7825 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, 7826 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, 7827 .access = PL1_RW, .accessfn = access_tacr, 7828 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }, 7829 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, 7830 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, 7831 .access = PL2_RW, .type = ARM_CP_CONST, 7832 .resetvalue = 0 }, 7833 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, 7834 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, 7835 .access = PL3_RW, .type = ARM_CP_CONST, 7836 .resetvalue = 0 }, 7837 REGINFO_SENTINEL 7838 }; 7839 define_arm_cp_regs(cpu, auxcr_reginfo); 7840 if (cpu_isar_feature(aa32_ac2, cpu)) { 7841 define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo); 7842 } 7843 } 7844 7845 if (arm_feature(env, ARM_FEATURE_CBAR)) { 7846 /* 7847 * CBAR is IMPDEF, but common on Arm Cortex-A implementations. 7848 * There are two flavours: 7849 * (1) older 32-bit only cores have a simple 32-bit CBAR 7850 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a 7851 * 32-bit register visible to AArch32 at a different encoding 7852 * to the "flavour 1" register and with the bits rearranged to 7853 * be able to squash a 64-bit address into the 32-bit view. 7854 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but 7855 * in future if we support AArch32-only configs of some of the 7856 * AArch64 cores we might need to add a specific feature flag 7857 * to indicate cores with "flavour 2" CBAR. 7858 */ 7859 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 7860 /* 32 bit view is [31:18] 0...0 [43:32]. */ 7861 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) 7862 | extract64(cpu->reset_cbar, 32, 12); 7863 ARMCPRegInfo cbar_reginfo[] = { 7864 { .name = "CBAR", 7865 .type = ARM_CP_CONST, 7866 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0, 7867 .access = PL1_R, .resetvalue = cbar32 }, 7868 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, 7869 .type = ARM_CP_CONST, 7870 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, 7871 .access = PL1_R, .resetvalue = cpu->reset_cbar }, 7872 REGINFO_SENTINEL 7873 }; 7874 /* We don't implement a r/w 64 bit CBAR currently */ 7875 assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); 7876 define_arm_cp_regs(cpu, cbar_reginfo); 7877 } else { 7878 ARMCPRegInfo cbar = { 7879 .name = "CBAR", 7880 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 7881 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, 7882 .fieldoffset = offsetof(CPUARMState, 7883 cp15.c15_config_base_address) 7884 }; 7885 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { 7886 cbar.access = PL1_R; 7887 cbar.fieldoffset = 0; 7888 cbar.type = ARM_CP_CONST; 7889 } 7890 define_one_arm_cp_reg(cpu, &cbar); 7891 } 7892 } 7893 7894 if (arm_feature(env, ARM_FEATURE_VBAR)) { 7895 ARMCPRegInfo vbar_cp_reginfo[] = { 7896 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, 7897 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, 7898 .access = PL1_RW, .writefn = vbar_write, 7899 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), 7900 offsetof(CPUARMState, cp15.vbar_ns) }, 7901 .resetvalue = 0 }, 7902 REGINFO_SENTINEL 7903 }; 7904 define_arm_cp_regs(cpu, vbar_cp_reginfo); 7905 } 7906 7907 /* Generic registers whose values depend on the implementation */ 7908 { 7909 ARMCPRegInfo sctlr = { 7910 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, 7911 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 7912 .access = PL1_RW, .accessfn = access_tvm_trvm, 7913 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), 7914 offsetof(CPUARMState, cp15.sctlr_ns) }, 7915 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, 7916 .raw_writefn = raw_write, 7917 }; 7918 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 7919 /* Normally we would always end the TB on an SCTLR write, but Linux 7920 * arch/arm/mach-pxa/sleep.S expects two instructions following 7921 * an MMU enable to execute from cache. Imitate this behaviour. 7922 */ 7923 sctlr.type |= ARM_CP_SUPPRESS_TB_END; 7924 } 7925 define_one_arm_cp_reg(cpu, &sctlr); 7926 } 7927 7928 if (cpu_isar_feature(aa64_lor, cpu)) { 7929 define_arm_cp_regs(cpu, lor_reginfo); 7930 } 7931 if (cpu_isar_feature(aa64_pan, cpu)) { 7932 define_one_arm_cp_reg(cpu, &pan_reginfo); 7933 } 7934 #ifndef CONFIG_USER_ONLY 7935 if (cpu_isar_feature(aa64_ats1e1, cpu)) { 7936 define_arm_cp_regs(cpu, ats1e1_reginfo); 7937 } 7938 if (cpu_isar_feature(aa32_ats1e1, cpu)) { 7939 define_arm_cp_regs(cpu, ats1cp_reginfo); 7940 } 7941 #endif 7942 if (cpu_isar_feature(aa64_uao, cpu)) { 7943 define_one_arm_cp_reg(cpu, &uao_reginfo); 7944 } 7945 7946 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { 7947 define_arm_cp_regs(cpu, vhe_reginfo); 7948 } 7949 7950 if (cpu_isar_feature(aa64_sve, cpu)) { 7951 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); 7952 if (arm_feature(env, ARM_FEATURE_EL2)) { 7953 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); 7954 } else { 7955 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); 7956 } 7957 if (arm_feature(env, ARM_FEATURE_EL3)) { 7958 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); 7959 } 7960 } 7961 7962 #ifdef TARGET_AARCH64 7963 if (cpu_isar_feature(aa64_pauth, cpu)) { 7964 define_arm_cp_regs(cpu, pauth_reginfo); 7965 } 7966 if (cpu_isar_feature(aa64_rndr, cpu)) { 7967 define_arm_cp_regs(cpu, rndr_reginfo); 7968 } 7969 #ifndef CONFIG_USER_ONLY 7970 /* Data Cache clean instructions up to PoP */ 7971 if (cpu_isar_feature(aa64_dcpop, cpu)) { 7972 define_one_arm_cp_reg(cpu, dcpop_reg); 7973 7974 if (cpu_isar_feature(aa64_dcpodp, cpu)) { 7975 define_one_arm_cp_reg(cpu, dcpodp_reg); 7976 } 7977 } 7978 #endif /*CONFIG_USER_ONLY*/ 7979 #endif 7980 7981 if (cpu_isar_feature(any_predinv, cpu)) { 7982 define_arm_cp_regs(cpu, predinv_reginfo); 7983 } 7984 7985 if (cpu_isar_feature(any_ccidx, cpu)) { 7986 define_arm_cp_regs(cpu, ccsidr2_reginfo); 7987 } 7988 7989 #ifndef CONFIG_USER_ONLY 7990 /* 7991 * Register redirections and aliases must be done last, 7992 * after the registers from the other extensions have been defined. 7993 */ 7994 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { 7995 define_arm_vh_e2h_redirects_aliases(cpu); 7996 } 7997 #endif 7998 } 7999 8000 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) 8001 { 8002 CPUState *cs = CPU(cpu); 8003 CPUARMState *env = &cpu->env; 8004 8005 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 8006 /* 8007 * The lower part of each SVE register aliases to the FPU 8008 * registers so we don't need to include both. 8009 */ 8010 #ifdef TARGET_AARCH64 8011 if (isar_feature_aa64_sve(&cpu->isar)) { 8012 gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg, 8013 arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs), 8014 "sve-registers.xml", 0); 8015 } else 8016 #endif 8017 { 8018 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, 8019 aarch64_fpu_gdb_set_reg, 8020 34, "aarch64-fpu.xml", 0); 8021 } 8022 } else if (arm_feature(env, ARM_FEATURE_NEON)) { 8023 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 8024 51, "arm-neon.xml", 0); 8025 } else if (cpu_isar_feature(aa32_simd_r32, cpu)) { 8026 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 8027 35, "arm-vfp3.xml", 0); 8028 } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) { 8029 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 8030 19, "arm-vfp.xml", 0); 8031 } 8032 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, 8033 arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs), 8034 "system-registers.xml", 0); 8035 8036 } 8037 8038 /* Sort alphabetically by type name, except for "any". */ 8039 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) 8040 { 8041 ObjectClass *class_a = (ObjectClass *)a; 8042 ObjectClass *class_b = (ObjectClass *)b; 8043 const char *name_a, *name_b; 8044 8045 name_a = object_class_get_name(class_a); 8046 name_b = object_class_get_name(class_b); 8047 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { 8048 return 1; 8049 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { 8050 return -1; 8051 } else { 8052 return strcmp(name_a, name_b); 8053 } 8054 } 8055 8056 static void arm_cpu_list_entry(gpointer data, gpointer user_data) 8057 { 8058 ObjectClass *oc = data; 8059 const char *typename; 8060 char *name; 8061 8062 typename = object_class_get_name(oc); 8063 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); 8064 qemu_printf(" %s\n", name); 8065 g_free(name); 8066 } 8067 8068 void arm_cpu_list(void) 8069 { 8070 GSList *list; 8071 8072 list = object_class_get_list(TYPE_ARM_CPU, false); 8073 list = g_slist_sort(list, arm_cpu_list_compare); 8074 qemu_printf("Available CPUs:\n"); 8075 g_slist_foreach(list, arm_cpu_list_entry, NULL); 8076 g_slist_free(list); 8077 } 8078 8079 static void arm_cpu_add_definition(gpointer data, gpointer user_data) 8080 { 8081 ObjectClass *oc = data; 8082 CpuDefinitionInfoList **cpu_list = user_data; 8083 CpuDefinitionInfoList *entry; 8084 CpuDefinitionInfo *info; 8085 const char *typename; 8086 8087 typename = object_class_get_name(oc); 8088 info = g_malloc0(sizeof(*info)); 8089 info->name = g_strndup(typename, 8090 strlen(typename) - strlen("-" TYPE_ARM_CPU)); 8091 info->q_typename = g_strdup(typename); 8092 8093 entry = g_malloc0(sizeof(*entry)); 8094 entry->value = info; 8095 entry->next = *cpu_list; 8096 *cpu_list = entry; 8097 } 8098 8099 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 8100 { 8101 CpuDefinitionInfoList *cpu_list = NULL; 8102 GSList *list; 8103 8104 list = object_class_get_list(TYPE_ARM_CPU, false); 8105 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); 8106 g_slist_free(list); 8107 8108 return cpu_list; 8109 } 8110 8111 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, 8112 void *opaque, int state, int secstate, 8113 int crm, int opc1, int opc2, 8114 const char *name) 8115 { 8116 /* Private utility function for define_one_arm_cp_reg_with_opaque(): 8117 * add a single reginfo struct to the hash table. 8118 */ 8119 uint32_t *key = g_new(uint32_t, 1); 8120 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); 8121 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; 8122 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; 8123 8124 r2->name = g_strdup(name); 8125 /* Reset the secure state to the specific incoming state. This is 8126 * necessary as the register may have been defined with both states. 8127 */ 8128 r2->secure = secstate; 8129 8130 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 8131 /* Register is banked (using both entries in array). 8132 * Overwriting fieldoffset as the array is only used to define 8133 * banked registers but later only fieldoffset is used. 8134 */ 8135 r2->fieldoffset = r->bank_fieldoffsets[ns]; 8136 } 8137 8138 if (state == ARM_CP_STATE_AA32) { 8139 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 8140 /* If the register is banked then we don't need to migrate or 8141 * reset the 32-bit instance in certain cases: 8142 * 8143 * 1) If the register has both 32-bit and 64-bit instances then we 8144 * can count on the 64-bit instance taking care of the 8145 * non-secure bank. 8146 * 2) If ARMv8 is enabled then we can count on a 64-bit version 8147 * taking care of the secure bank. This requires that separate 8148 * 32 and 64-bit definitions are provided. 8149 */ 8150 if ((r->state == ARM_CP_STATE_BOTH && ns) || 8151 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { 8152 r2->type |= ARM_CP_ALIAS; 8153 } 8154 } else if ((secstate != r->secure) && !ns) { 8155 /* The register is not banked so we only want to allow migration of 8156 * the non-secure instance. 8157 */ 8158 r2->type |= ARM_CP_ALIAS; 8159 } 8160 8161 if (r->state == ARM_CP_STATE_BOTH) { 8162 /* We assume it is a cp15 register if the .cp field is left unset. 8163 */ 8164 if (r2->cp == 0) { 8165 r2->cp = 15; 8166 } 8167 8168 #ifdef HOST_WORDS_BIGENDIAN 8169 if (r2->fieldoffset) { 8170 r2->fieldoffset += sizeof(uint32_t); 8171 } 8172 #endif 8173 } 8174 } 8175 if (state == ARM_CP_STATE_AA64) { 8176 /* To allow abbreviation of ARMCPRegInfo 8177 * definitions, we treat cp == 0 as equivalent to 8178 * the value for "standard guest-visible sysreg". 8179 * STATE_BOTH definitions are also always "standard 8180 * sysreg" in their AArch64 view (the .cp value may 8181 * be non-zero for the benefit of the AArch32 view). 8182 */ 8183 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { 8184 r2->cp = CP_REG_ARM64_SYSREG_CP; 8185 } 8186 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, 8187 r2->opc0, opc1, opc2); 8188 } else { 8189 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); 8190 } 8191 if (opaque) { 8192 r2->opaque = opaque; 8193 } 8194 /* reginfo passed to helpers is correct for the actual access, 8195 * and is never ARM_CP_STATE_BOTH: 8196 */ 8197 r2->state = state; 8198 /* Make sure reginfo passed to helpers for wildcarded regs 8199 * has the correct crm/opc1/opc2 for this reg, not CP_ANY: 8200 */ 8201 r2->crm = crm; 8202 r2->opc1 = opc1; 8203 r2->opc2 = opc2; 8204 /* By convention, for wildcarded registers only the first 8205 * entry is used for migration; the others are marked as 8206 * ALIAS so we don't try to transfer the register 8207 * multiple times. Special registers (ie NOP/WFI) are 8208 * never migratable and not even raw-accessible. 8209 */ 8210 if ((r->type & ARM_CP_SPECIAL)) { 8211 r2->type |= ARM_CP_NO_RAW; 8212 } 8213 if (((r->crm == CP_ANY) && crm != 0) || 8214 ((r->opc1 == CP_ANY) && opc1 != 0) || 8215 ((r->opc2 == CP_ANY) && opc2 != 0)) { 8216 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; 8217 } 8218 8219 /* Check that raw accesses are either forbidden or handled. Note that 8220 * we can't assert this earlier because the setup of fieldoffset for 8221 * banked registers has to be done first. 8222 */ 8223 if (!(r2->type & ARM_CP_NO_RAW)) { 8224 assert(!raw_accessors_invalid(r2)); 8225 } 8226 8227 /* Overriding of an existing definition must be explicitly 8228 * requested. 8229 */ 8230 if (!(r->type & ARM_CP_OVERRIDE)) { 8231 ARMCPRegInfo *oldreg; 8232 oldreg = g_hash_table_lookup(cpu->cp_regs, key); 8233 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { 8234 fprintf(stderr, "Register redefined: cp=%d %d bit " 8235 "crn=%d crm=%d opc1=%d opc2=%d, " 8236 "was %s, now %s\n", r2->cp, 32 + 32 * is64, 8237 r2->crn, r2->crm, r2->opc1, r2->opc2, 8238 oldreg->name, r2->name); 8239 g_assert_not_reached(); 8240 } 8241 } 8242 g_hash_table_insert(cpu->cp_regs, key, r2); 8243 } 8244 8245 8246 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 8247 const ARMCPRegInfo *r, void *opaque) 8248 { 8249 /* Define implementations of coprocessor registers. 8250 * We store these in a hashtable because typically 8251 * there are less than 150 registers in a space which 8252 * is 16*16*16*8*8 = 262144 in size. 8253 * Wildcarding is supported for the crm, opc1 and opc2 fields. 8254 * If a register is defined twice then the second definition is 8255 * used, so this can be used to define some generic registers and 8256 * then override them with implementation specific variations. 8257 * At least one of the original and the second definition should 8258 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard 8259 * against accidental use. 8260 * 8261 * The state field defines whether the register is to be 8262 * visible in the AArch32 or AArch64 execution state. If the 8263 * state is set to ARM_CP_STATE_BOTH then we synthesise a 8264 * reginfo structure for the AArch32 view, which sees the lower 8265 * 32 bits of the 64 bit register. 8266 * 8267 * Only registers visible in AArch64 may set r->opc0; opc0 cannot 8268 * be wildcarded. AArch64 registers are always considered to be 64 8269 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of 8270 * the register, if any. 8271 */ 8272 int crm, opc1, opc2, state; 8273 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; 8274 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; 8275 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; 8276 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; 8277 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; 8278 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; 8279 /* 64 bit registers have only CRm and Opc1 fields */ 8280 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); 8281 /* op0 only exists in the AArch64 encodings */ 8282 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); 8283 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ 8284 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); 8285 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 8286 * encodes a minimum access level for the register. We roll this 8287 * runtime check into our general permission check code, so check 8288 * here that the reginfo's specified permissions are strict enough 8289 * to encompass the generic architectural permission check. 8290 */ 8291 if (r->state != ARM_CP_STATE_AA32) { 8292 int mask = 0; 8293 switch (r->opc1) { 8294 case 0: 8295 /* min_EL EL1, but some accessible to EL0 via kernel ABI */ 8296 mask = PL0U_R | PL1_RW; 8297 break; 8298 case 1: case 2: 8299 /* min_EL EL1 */ 8300 mask = PL1_RW; 8301 break; 8302 case 3: 8303 /* min_EL EL0 */ 8304 mask = PL0_RW; 8305 break; 8306 case 4: 8307 case 5: 8308 /* min_EL EL2 */ 8309 mask = PL2_RW; 8310 break; 8311 case 6: 8312 /* min_EL EL3 */ 8313 mask = PL3_RW; 8314 break; 8315 case 7: 8316 /* min_EL EL1, secure mode only (we don't check the latter) */ 8317 mask = PL1_RW; 8318 break; 8319 default: 8320 /* broken reginfo with out-of-range opc1 */ 8321 assert(false); 8322 break; 8323 } 8324 /* assert our permissions are not too lax (stricter is fine) */ 8325 assert((r->access & ~mask) == 0); 8326 } 8327 8328 /* Check that the register definition has enough info to handle 8329 * reads and writes if they are permitted. 8330 */ 8331 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { 8332 if (r->access & PL3_R) { 8333 assert((r->fieldoffset || 8334 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 8335 r->readfn); 8336 } 8337 if (r->access & PL3_W) { 8338 assert((r->fieldoffset || 8339 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 8340 r->writefn); 8341 } 8342 } 8343 /* Bad type field probably means missing sentinel at end of reg list */ 8344 assert(cptype_valid(r->type)); 8345 for (crm = crmmin; crm <= crmmax; crm++) { 8346 for (opc1 = opc1min; opc1 <= opc1max; opc1++) { 8347 for (opc2 = opc2min; opc2 <= opc2max; opc2++) { 8348 for (state = ARM_CP_STATE_AA32; 8349 state <= ARM_CP_STATE_AA64; state++) { 8350 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { 8351 continue; 8352 } 8353 if (state == ARM_CP_STATE_AA32) { 8354 /* Under AArch32 CP registers can be common 8355 * (same for secure and non-secure world) or banked. 8356 */ 8357 char *name; 8358 8359 switch (r->secure) { 8360 case ARM_CP_SECSTATE_S: 8361 case ARM_CP_SECSTATE_NS: 8362 add_cpreg_to_hashtable(cpu, r, opaque, state, 8363 r->secure, crm, opc1, opc2, 8364 r->name); 8365 break; 8366 default: 8367 name = g_strdup_printf("%s_S", r->name); 8368 add_cpreg_to_hashtable(cpu, r, opaque, state, 8369 ARM_CP_SECSTATE_S, 8370 crm, opc1, opc2, name); 8371 g_free(name); 8372 add_cpreg_to_hashtable(cpu, r, opaque, state, 8373 ARM_CP_SECSTATE_NS, 8374 crm, opc1, opc2, r->name); 8375 break; 8376 } 8377 } else { 8378 /* AArch64 registers get mapped to non-secure instance 8379 * of AArch32 */ 8380 add_cpreg_to_hashtable(cpu, r, opaque, state, 8381 ARM_CP_SECSTATE_NS, 8382 crm, opc1, opc2, r->name); 8383 } 8384 } 8385 } 8386 } 8387 } 8388 } 8389 8390 void define_arm_cp_regs_with_opaque(ARMCPU *cpu, 8391 const ARMCPRegInfo *regs, void *opaque) 8392 { 8393 /* Define a whole list of registers */ 8394 const ARMCPRegInfo *r; 8395 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 8396 define_one_arm_cp_reg_with_opaque(cpu, r, opaque); 8397 } 8398 } 8399 8400 /* 8401 * Modify ARMCPRegInfo for access from userspace. 8402 * 8403 * This is a data driven modification directed by 8404 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as 8405 * user-space cannot alter any values and dynamic values pertaining to 8406 * execution state are hidden from user space view anyway. 8407 */ 8408 void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods) 8409 { 8410 const ARMCPRegUserSpaceInfo *m; 8411 ARMCPRegInfo *r; 8412 8413 for (m = mods; m->name; m++) { 8414 GPatternSpec *pat = NULL; 8415 if (m->is_glob) { 8416 pat = g_pattern_spec_new(m->name); 8417 } 8418 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 8419 if (pat && g_pattern_match_string(pat, r->name)) { 8420 r->type = ARM_CP_CONST; 8421 r->access = PL0U_R; 8422 r->resetvalue = 0; 8423 /* continue */ 8424 } else if (strcmp(r->name, m->name) == 0) { 8425 r->type = ARM_CP_CONST; 8426 r->access = PL0U_R; 8427 r->resetvalue &= m->exported_bits; 8428 r->resetvalue |= m->fixed_bits; 8429 break; 8430 } 8431 } 8432 if (pat) { 8433 g_pattern_spec_free(pat); 8434 } 8435 } 8436 } 8437 8438 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) 8439 { 8440 return g_hash_table_lookup(cpregs, &encoded_cp); 8441 } 8442 8443 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 8444 uint64_t value) 8445 { 8446 /* Helper coprocessor write function for write-ignore registers */ 8447 } 8448 8449 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) 8450 { 8451 /* Helper coprocessor write function for read-as-zero registers */ 8452 return 0; 8453 } 8454 8455 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) 8456 { 8457 /* Helper coprocessor reset function for do-nothing-on-reset registers */ 8458 } 8459 8460 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) 8461 { 8462 /* Return true if it is not valid for us to switch to 8463 * this CPU mode (ie all the UNPREDICTABLE cases in 8464 * the ARM ARM CPSRWriteByInstr pseudocode). 8465 */ 8466 8467 /* Changes to or from Hyp via MSR and CPS are illegal. */ 8468 if (write_type == CPSRWriteByInstr && 8469 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || 8470 mode == ARM_CPU_MODE_HYP)) { 8471 return 1; 8472 } 8473 8474 switch (mode) { 8475 case ARM_CPU_MODE_USR: 8476 return 0; 8477 case ARM_CPU_MODE_SYS: 8478 case ARM_CPU_MODE_SVC: 8479 case ARM_CPU_MODE_ABT: 8480 case ARM_CPU_MODE_UND: 8481 case ARM_CPU_MODE_IRQ: 8482 case ARM_CPU_MODE_FIQ: 8483 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 8484 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) 8485 */ 8486 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR 8487 * and CPS are treated as illegal mode changes. 8488 */ 8489 if (write_type == CPSRWriteByInstr && 8490 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && 8491 (arm_hcr_el2_eff(env) & HCR_TGE)) { 8492 return 1; 8493 } 8494 return 0; 8495 case ARM_CPU_MODE_HYP: 8496 return !arm_feature(env, ARM_FEATURE_EL2) 8497 || arm_current_el(env) < 2 || arm_is_secure_below_el3(env); 8498 case ARM_CPU_MODE_MON: 8499 return arm_current_el(env) < 3; 8500 default: 8501 return 1; 8502 } 8503 } 8504 8505 uint32_t cpsr_read(CPUARMState *env) 8506 { 8507 int ZF; 8508 ZF = (env->ZF == 0); 8509 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 8510 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 8511 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 8512 | ((env->condexec_bits & 0xfc) << 8) 8513 | (env->GE << 16) | (env->daif & CPSR_AIF); 8514 } 8515 8516 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 8517 CPSRWriteType write_type) 8518 { 8519 uint32_t changed_daif; 8520 8521 if (mask & CPSR_NZCV) { 8522 env->ZF = (~val) & CPSR_Z; 8523 env->NF = val; 8524 env->CF = (val >> 29) & 1; 8525 env->VF = (val << 3) & 0x80000000; 8526 } 8527 if (mask & CPSR_Q) 8528 env->QF = ((val & CPSR_Q) != 0); 8529 if (mask & CPSR_T) 8530 env->thumb = ((val & CPSR_T) != 0); 8531 if (mask & CPSR_IT_0_1) { 8532 env->condexec_bits &= ~3; 8533 env->condexec_bits |= (val >> 25) & 3; 8534 } 8535 if (mask & CPSR_IT_2_7) { 8536 env->condexec_bits &= 3; 8537 env->condexec_bits |= (val >> 8) & 0xfc; 8538 } 8539 if (mask & CPSR_GE) { 8540 env->GE = (val >> 16) & 0xf; 8541 } 8542 8543 /* In a V7 implementation that includes the security extensions but does 8544 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control 8545 * whether non-secure software is allowed to change the CPSR_F and CPSR_A 8546 * bits respectively. 8547 * 8548 * In a V8 implementation, it is permitted for privileged software to 8549 * change the CPSR A/F bits regardless of the SCR.AW/FW bits. 8550 */ 8551 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && 8552 arm_feature(env, ARM_FEATURE_EL3) && 8553 !arm_feature(env, ARM_FEATURE_EL2) && 8554 !arm_is_secure(env)) { 8555 8556 changed_daif = (env->daif ^ val) & mask; 8557 8558 if (changed_daif & CPSR_A) { 8559 /* Check to see if we are allowed to change the masking of async 8560 * abort exceptions from a non-secure state. 8561 */ 8562 if (!(env->cp15.scr_el3 & SCR_AW)) { 8563 qemu_log_mask(LOG_GUEST_ERROR, 8564 "Ignoring attempt to switch CPSR_A flag from " 8565 "non-secure world with SCR.AW bit clear\n"); 8566 mask &= ~CPSR_A; 8567 } 8568 } 8569 8570 if (changed_daif & CPSR_F) { 8571 /* Check to see if we are allowed to change the masking of FIQ 8572 * exceptions from a non-secure state. 8573 */ 8574 if (!(env->cp15.scr_el3 & SCR_FW)) { 8575 qemu_log_mask(LOG_GUEST_ERROR, 8576 "Ignoring attempt to switch CPSR_F flag from " 8577 "non-secure world with SCR.FW bit clear\n"); 8578 mask &= ~CPSR_F; 8579 } 8580 8581 /* Check whether non-maskable FIQ (NMFI) support is enabled. 8582 * If this bit is set software is not allowed to mask 8583 * FIQs, but is allowed to set CPSR_F to 0. 8584 */ 8585 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && 8586 (val & CPSR_F)) { 8587 qemu_log_mask(LOG_GUEST_ERROR, 8588 "Ignoring attempt to enable CPSR_F flag " 8589 "(non-maskable FIQ [NMFI] support enabled)\n"); 8590 mask &= ~CPSR_F; 8591 } 8592 } 8593 } 8594 8595 env->daif &= ~(CPSR_AIF & mask); 8596 env->daif |= val & CPSR_AIF & mask; 8597 8598 if (write_type != CPSRWriteRaw && 8599 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { 8600 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { 8601 /* Note that we can only get here in USR mode if this is a 8602 * gdb stub write; for this case we follow the architectural 8603 * behaviour for guest writes in USR mode of ignoring an attempt 8604 * to switch mode. (Those are caught by translate.c for writes 8605 * triggered by guest instructions.) 8606 */ 8607 mask &= ~CPSR_M; 8608 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { 8609 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in 8610 * v7, and has defined behaviour in v8: 8611 * + leave CPSR.M untouched 8612 * + allow changes to the other CPSR fields 8613 * + set PSTATE.IL 8614 * For user changes via the GDB stub, we don't set PSTATE.IL, 8615 * as this would be unnecessarily harsh for a user error. 8616 */ 8617 mask &= ~CPSR_M; 8618 if (write_type != CPSRWriteByGDBStub && 8619 arm_feature(env, ARM_FEATURE_V8)) { 8620 mask |= CPSR_IL; 8621 val |= CPSR_IL; 8622 } 8623 qemu_log_mask(LOG_GUEST_ERROR, 8624 "Illegal AArch32 mode switch attempt from %s to %s\n", 8625 aarch32_mode_name(env->uncached_cpsr), 8626 aarch32_mode_name(val)); 8627 } else { 8628 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n", 8629 write_type == CPSRWriteExceptionReturn ? 8630 "Exception return from AArch32" : 8631 "AArch32 mode switch from", 8632 aarch32_mode_name(env->uncached_cpsr), 8633 aarch32_mode_name(val), env->regs[15]); 8634 switch_mode(env, val & CPSR_M); 8635 } 8636 } 8637 mask &= ~CACHED_CPSR_BITS; 8638 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 8639 } 8640 8641 /* Sign/zero extend */ 8642 uint32_t HELPER(sxtb16)(uint32_t x) 8643 { 8644 uint32_t res; 8645 res = (uint16_t)(int8_t)x; 8646 res |= (uint32_t)(int8_t)(x >> 16) << 16; 8647 return res; 8648 } 8649 8650 uint32_t HELPER(uxtb16)(uint32_t x) 8651 { 8652 uint32_t res; 8653 res = (uint16_t)(uint8_t)x; 8654 res |= (uint32_t)(uint8_t)(x >> 16) << 16; 8655 return res; 8656 } 8657 8658 int32_t HELPER(sdiv)(int32_t num, int32_t den) 8659 { 8660 if (den == 0) 8661 return 0; 8662 if (num == INT_MIN && den == -1) 8663 return INT_MIN; 8664 return num / den; 8665 } 8666 8667 uint32_t HELPER(udiv)(uint32_t num, uint32_t den) 8668 { 8669 if (den == 0) 8670 return 0; 8671 return num / den; 8672 } 8673 8674 uint32_t HELPER(rbit)(uint32_t x) 8675 { 8676 return revbit32(x); 8677 } 8678 8679 #ifdef CONFIG_USER_ONLY 8680 8681 static void switch_mode(CPUARMState *env, int mode) 8682 { 8683 ARMCPU *cpu = env_archcpu(env); 8684 8685 if (mode != ARM_CPU_MODE_USR) { 8686 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); 8687 } 8688 } 8689 8690 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 8691 uint32_t cur_el, bool secure) 8692 { 8693 return 1; 8694 } 8695 8696 void aarch64_sync_64_to_32(CPUARMState *env) 8697 { 8698 g_assert_not_reached(); 8699 } 8700 8701 #else 8702 8703 static void switch_mode(CPUARMState *env, int mode) 8704 { 8705 int old_mode; 8706 int i; 8707 8708 old_mode = env->uncached_cpsr & CPSR_M; 8709 if (mode == old_mode) 8710 return; 8711 8712 if (old_mode == ARM_CPU_MODE_FIQ) { 8713 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 8714 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 8715 } else if (mode == ARM_CPU_MODE_FIQ) { 8716 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 8717 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 8718 } 8719 8720 i = bank_number(old_mode); 8721 env->banked_r13[i] = env->regs[13]; 8722 env->banked_spsr[i] = env->spsr; 8723 8724 i = bank_number(mode); 8725 env->regs[13] = env->banked_r13[i]; 8726 env->spsr = env->banked_spsr[i]; 8727 8728 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; 8729 env->regs[14] = env->banked_r14[r14_bank_number(mode)]; 8730 } 8731 8732 /* Physical Interrupt Target EL Lookup Table 8733 * 8734 * [ From ARM ARM section G1.13.4 (Table G1-15) ] 8735 * 8736 * The below multi-dimensional table is used for looking up the target 8737 * exception level given numerous condition criteria. Specifically, the 8738 * target EL is based on SCR and HCR routing controls as well as the 8739 * currently executing EL and secure state. 8740 * 8741 * Dimensions: 8742 * target_el_table[2][2][2][2][2][4] 8743 * | | | | | +--- Current EL 8744 * | | | | +------ Non-secure(0)/Secure(1) 8745 * | | | +--------- HCR mask override 8746 * | | +------------ SCR exec state control 8747 * | +--------------- SCR mask override 8748 * +------------------ 32-bit(0)/64-bit(1) EL3 8749 * 8750 * The table values are as such: 8751 * 0-3 = EL0-EL3 8752 * -1 = Cannot occur 8753 * 8754 * The ARM ARM target EL table includes entries indicating that an "exception 8755 * is not taken". The two cases where this is applicable are: 8756 * 1) An exception is taken from EL3 but the SCR does not have the exception 8757 * routed to EL3. 8758 * 2) An exception is taken from EL2 but the HCR does not have the exception 8759 * routed to EL2. 8760 * In these two cases, the below table contain a target of EL1. This value is 8761 * returned as it is expected that the consumer of the table data will check 8762 * for "target EL >= current EL" to ensure the exception is not taken. 8763 * 8764 * SCR HCR 8765 * 64 EA AMO From 8766 * BIT IRQ IMO Non-secure Secure 8767 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 8768 */ 8769 static const int8_t target_el_table[2][2][2][2][2][4] = { 8770 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 8771 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, 8772 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 8773 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, 8774 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 8775 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, 8776 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 8777 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, 8778 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, 8779 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},}, 8780 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },}, 8781 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},}, 8782 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 8783 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, 8784 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 8785 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},}, 8786 }; 8787 8788 /* 8789 * Determine the target EL for physical exceptions 8790 */ 8791 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 8792 uint32_t cur_el, bool secure) 8793 { 8794 CPUARMState *env = cs->env_ptr; 8795 bool rw; 8796 bool scr; 8797 bool hcr; 8798 int target_el; 8799 /* Is the highest EL AArch64? */ 8800 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64); 8801 uint64_t hcr_el2; 8802 8803 if (arm_feature(env, ARM_FEATURE_EL3)) { 8804 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); 8805 } else { 8806 /* Either EL2 is the highest EL (and so the EL2 register width 8807 * is given by is64); or there is no EL2 or EL3, in which case 8808 * the value of 'rw' does not affect the table lookup anyway. 8809 */ 8810 rw = is64; 8811 } 8812 8813 hcr_el2 = arm_hcr_el2_eff(env); 8814 switch (excp_idx) { 8815 case EXCP_IRQ: 8816 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); 8817 hcr = hcr_el2 & HCR_IMO; 8818 break; 8819 case EXCP_FIQ: 8820 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); 8821 hcr = hcr_el2 & HCR_FMO; 8822 break; 8823 default: 8824 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); 8825 hcr = hcr_el2 & HCR_AMO; 8826 break; 8827 }; 8828 8829 /* 8830 * For these purposes, TGE and AMO/IMO/FMO both force the 8831 * interrupt to EL2. Fold TGE into the bit extracted above. 8832 */ 8833 hcr |= (hcr_el2 & HCR_TGE) != 0; 8834 8835 /* Perform a table-lookup for the target EL given the current state */ 8836 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; 8837 8838 assert(target_el > 0); 8839 8840 return target_el; 8841 } 8842 8843 void arm_log_exception(int idx) 8844 { 8845 if (qemu_loglevel_mask(CPU_LOG_INT)) { 8846 const char *exc = NULL; 8847 static const char * const excnames[] = { 8848 [EXCP_UDEF] = "Undefined Instruction", 8849 [EXCP_SWI] = "SVC", 8850 [EXCP_PREFETCH_ABORT] = "Prefetch Abort", 8851 [EXCP_DATA_ABORT] = "Data Abort", 8852 [EXCP_IRQ] = "IRQ", 8853 [EXCP_FIQ] = "FIQ", 8854 [EXCP_BKPT] = "Breakpoint", 8855 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", 8856 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", 8857 [EXCP_HVC] = "Hypervisor Call", 8858 [EXCP_HYP_TRAP] = "Hypervisor Trap", 8859 [EXCP_SMC] = "Secure Monitor Call", 8860 [EXCP_VIRQ] = "Virtual IRQ", 8861 [EXCP_VFIQ] = "Virtual FIQ", 8862 [EXCP_SEMIHOST] = "Semihosting call", 8863 [EXCP_NOCP] = "v7M NOCP UsageFault", 8864 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", 8865 [EXCP_STKOF] = "v8M STKOF UsageFault", 8866 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking", 8867 [EXCP_LSERR] = "v8M LSERR UsageFault", 8868 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault", 8869 }; 8870 8871 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { 8872 exc = excnames[idx]; 8873 } 8874 if (!exc) { 8875 exc = "unknown"; 8876 } 8877 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); 8878 } 8879 } 8880 8881 /* 8882 * Function used to synchronize QEMU's AArch64 register set with AArch32 8883 * register set. This is necessary when switching between AArch32 and AArch64 8884 * execution state. 8885 */ 8886 void aarch64_sync_32_to_64(CPUARMState *env) 8887 { 8888 int i; 8889 uint32_t mode = env->uncached_cpsr & CPSR_M; 8890 8891 /* We can blanket copy R[0:7] to X[0:7] */ 8892 for (i = 0; i < 8; i++) { 8893 env->xregs[i] = env->regs[i]; 8894 } 8895 8896 /* 8897 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. 8898 * Otherwise, they come from the banked user regs. 8899 */ 8900 if (mode == ARM_CPU_MODE_FIQ) { 8901 for (i = 8; i < 13; i++) { 8902 env->xregs[i] = env->usr_regs[i - 8]; 8903 } 8904 } else { 8905 for (i = 8; i < 13; i++) { 8906 env->xregs[i] = env->regs[i]; 8907 } 8908 } 8909 8910 /* 8911 * Registers x13-x23 are the various mode SP and FP registers. Registers 8912 * r13 and r14 are only copied if we are in that mode, otherwise we copy 8913 * from the mode banked register. 8914 */ 8915 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 8916 env->xregs[13] = env->regs[13]; 8917 env->xregs[14] = env->regs[14]; 8918 } else { 8919 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; 8920 /* HYP is an exception in that it is copied from r14 */ 8921 if (mode == ARM_CPU_MODE_HYP) { 8922 env->xregs[14] = env->regs[14]; 8923 } else { 8924 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; 8925 } 8926 } 8927 8928 if (mode == ARM_CPU_MODE_HYP) { 8929 env->xregs[15] = env->regs[13]; 8930 } else { 8931 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; 8932 } 8933 8934 if (mode == ARM_CPU_MODE_IRQ) { 8935 env->xregs[16] = env->regs[14]; 8936 env->xregs[17] = env->regs[13]; 8937 } else { 8938 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; 8939 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; 8940 } 8941 8942 if (mode == ARM_CPU_MODE_SVC) { 8943 env->xregs[18] = env->regs[14]; 8944 env->xregs[19] = env->regs[13]; 8945 } else { 8946 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; 8947 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; 8948 } 8949 8950 if (mode == ARM_CPU_MODE_ABT) { 8951 env->xregs[20] = env->regs[14]; 8952 env->xregs[21] = env->regs[13]; 8953 } else { 8954 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; 8955 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; 8956 } 8957 8958 if (mode == ARM_CPU_MODE_UND) { 8959 env->xregs[22] = env->regs[14]; 8960 env->xregs[23] = env->regs[13]; 8961 } else { 8962 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; 8963 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; 8964 } 8965 8966 /* 8967 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 8968 * mode, then we can copy from r8-r14. Otherwise, we copy from the 8969 * FIQ bank for r8-r14. 8970 */ 8971 if (mode == ARM_CPU_MODE_FIQ) { 8972 for (i = 24; i < 31; i++) { 8973 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ 8974 } 8975 } else { 8976 for (i = 24; i < 29; i++) { 8977 env->xregs[i] = env->fiq_regs[i - 24]; 8978 } 8979 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; 8980 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; 8981 } 8982 8983 env->pc = env->regs[15]; 8984 } 8985 8986 /* 8987 * Function used to synchronize QEMU's AArch32 register set with AArch64 8988 * register set. This is necessary when switching between AArch32 and AArch64 8989 * execution state. 8990 */ 8991 void aarch64_sync_64_to_32(CPUARMState *env) 8992 { 8993 int i; 8994 uint32_t mode = env->uncached_cpsr & CPSR_M; 8995 8996 /* We can blanket copy X[0:7] to R[0:7] */ 8997 for (i = 0; i < 8; i++) { 8998 env->regs[i] = env->xregs[i]; 8999 } 9000 9001 /* 9002 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. 9003 * Otherwise, we copy x8-x12 into the banked user regs. 9004 */ 9005 if (mode == ARM_CPU_MODE_FIQ) { 9006 for (i = 8; i < 13; i++) { 9007 env->usr_regs[i - 8] = env->xregs[i]; 9008 } 9009 } else { 9010 for (i = 8; i < 13; i++) { 9011 env->regs[i] = env->xregs[i]; 9012 } 9013 } 9014 9015 /* 9016 * Registers r13 & r14 depend on the current mode. 9017 * If we are in a given mode, we copy the corresponding x registers to r13 9018 * and r14. Otherwise, we copy the x register to the banked r13 and r14 9019 * for the mode. 9020 */ 9021 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 9022 env->regs[13] = env->xregs[13]; 9023 env->regs[14] = env->xregs[14]; 9024 } else { 9025 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; 9026 9027 /* 9028 * HYP is an exception in that it does not have its own banked r14 but 9029 * shares the USR r14 9030 */ 9031 if (mode == ARM_CPU_MODE_HYP) { 9032 env->regs[14] = env->xregs[14]; 9033 } else { 9034 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; 9035 } 9036 } 9037 9038 if (mode == ARM_CPU_MODE_HYP) { 9039 env->regs[13] = env->xregs[15]; 9040 } else { 9041 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; 9042 } 9043 9044 if (mode == ARM_CPU_MODE_IRQ) { 9045 env->regs[14] = env->xregs[16]; 9046 env->regs[13] = env->xregs[17]; 9047 } else { 9048 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; 9049 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; 9050 } 9051 9052 if (mode == ARM_CPU_MODE_SVC) { 9053 env->regs[14] = env->xregs[18]; 9054 env->regs[13] = env->xregs[19]; 9055 } else { 9056 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; 9057 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; 9058 } 9059 9060 if (mode == ARM_CPU_MODE_ABT) { 9061 env->regs[14] = env->xregs[20]; 9062 env->regs[13] = env->xregs[21]; 9063 } else { 9064 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; 9065 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; 9066 } 9067 9068 if (mode == ARM_CPU_MODE_UND) { 9069 env->regs[14] = env->xregs[22]; 9070 env->regs[13] = env->xregs[23]; 9071 } else { 9072 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; 9073 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; 9074 } 9075 9076 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 9077 * mode, then we can copy to r8-r14. Otherwise, we copy to the 9078 * FIQ bank for r8-r14. 9079 */ 9080 if (mode == ARM_CPU_MODE_FIQ) { 9081 for (i = 24; i < 31; i++) { 9082 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ 9083 } 9084 } else { 9085 for (i = 24; i < 29; i++) { 9086 env->fiq_regs[i - 24] = env->xregs[i]; 9087 } 9088 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; 9089 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; 9090 } 9091 9092 env->regs[15] = env->pc; 9093 } 9094 9095 static void take_aarch32_exception(CPUARMState *env, int new_mode, 9096 uint32_t mask, uint32_t offset, 9097 uint32_t newpc) 9098 { 9099 int new_el; 9100 9101 /* Change the CPU state so as to actually take the exception. */ 9102 switch_mode(env, new_mode); 9103 9104 /* 9105 * For exceptions taken to AArch32 we must clear the SS bit in both 9106 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. 9107 */ 9108 env->uncached_cpsr &= ~PSTATE_SS; 9109 env->spsr = cpsr_read(env); 9110 /* Clear IT bits. */ 9111 env->condexec_bits = 0; 9112 /* Switch to the new mode, and to the correct instruction set. */ 9113 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 9114 9115 /* This must be after mode switching. */ 9116 new_el = arm_current_el(env); 9117 9118 /* Set new mode endianness */ 9119 env->uncached_cpsr &= ~CPSR_E; 9120 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) { 9121 env->uncached_cpsr |= CPSR_E; 9122 } 9123 /* J and IL must always be cleared for exception entry */ 9124 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); 9125 env->daif |= mask; 9126 9127 if (new_mode == ARM_CPU_MODE_HYP) { 9128 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; 9129 env->elr_el[2] = env->regs[15]; 9130 } else { 9131 /* CPSR.PAN is normally preserved preserved unless... */ 9132 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) { 9133 switch (new_el) { 9134 case 3: 9135 if (!arm_is_secure_below_el3(env)) { 9136 /* ... the target is EL3, from non-secure state. */ 9137 env->uncached_cpsr &= ~CPSR_PAN; 9138 break; 9139 } 9140 /* ... the target is EL3, from secure state ... */ 9141 /* fall through */ 9142 case 1: 9143 /* ... the target is EL1 and SCTLR.SPAN is 0. */ 9144 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) { 9145 env->uncached_cpsr |= CPSR_PAN; 9146 } 9147 break; 9148 } 9149 } 9150 /* 9151 * this is a lie, as there was no c1_sys on V4T/V5, but who cares 9152 * and we should just guard the thumb mode on V4 9153 */ 9154 if (arm_feature(env, ARM_FEATURE_V4T)) { 9155 env->thumb = 9156 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; 9157 } 9158 env->regs[14] = env->regs[15] + offset; 9159 } 9160 env->regs[15] = newpc; 9161 arm_rebuild_hflags(env); 9162 } 9163 9164 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) 9165 { 9166 /* 9167 * Handle exception entry to Hyp mode; this is sufficiently 9168 * different to entry to other AArch32 modes that we handle it 9169 * separately here. 9170 * 9171 * The vector table entry used is always the 0x14 Hyp mode entry point, 9172 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp. 9173 * The offset applied to the preferred return address is always zero 9174 * (see DDI0487C.a section G1.12.3). 9175 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. 9176 */ 9177 uint32_t addr, mask; 9178 ARMCPU *cpu = ARM_CPU(cs); 9179 CPUARMState *env = &cpu->env; 9180 9181 switch (cs->exception_index) { 9182 case EXCP_UDEF: 9183 addr = 0x04; 9184 break; 9185 case EXCP_SWI: 9186 addr = 0x14; 9187 break; 9188 case EXCP_BKPT: 9189 /* Fall through to prefetch abort. */ 9190 case EXCP_PREFETCH_ABORT: 9191 env->cp15.ifar_s = env->exception.vaddress; 9192 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n", 9193 (uint32_t)env->exception.vaddress); 9194 addr = 0x0c; 9195 break; 9196 case EXCP_DATA_ABORT: 9197 env->cp15.dfar_s = env->exception.vaddress; 9198 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n", 9199 (uint32_t)env->exception.vaddress); 9200 addr = 0x10; 9201 break; 9202 case EXCP_IRQ: 9203 addr = 0x18; 9204 break; 9205 case EXCP_FIQ: 9206 addr = 0x1c; 9207 break; 9208 case EXCP_HVC: 9209 addr = 0x08; 9210 break; 9211 case EXCP_HYP_TRAP: 9212 addr = 0x14; 9213 break; 9214 default: 9215 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9216 } 9217 9218 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { 9219 if (!arm_feature(env, ARM_FEATURE_V8)) { 9220 /* 9221 * QEMU syndrome values are v8-style. v7 has the IL bit 9222 * UNK/SBZP for "field not valid" cases, where v8 uses RES1. 9223 * If this is a v7 CPU, squash the IL bit in those cases. 9224 */ 9225 if (cs->exception_index == EXCP_PREFETCH_ABORT || 9226 (cs->exception_index == EXCP_DATA_ABORT && 9227 !(env->exception.syndrome & ARM_EL_ISV)) || 9228 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { 9229 env->exception.syndrome &= ~ARM_EL_IL; 9230 } 9231 } 9232 env->cp15.esr_el[2] = env->exception.syndrome; 9233 } 9234 9235 if (arm_current_el(env) != 2 && addr < 0x14) { 9236 addr = 0x14; 9237 } 9238 9239 mask = 0; 9240 if (!(env->cp15.scr_el3 & SCR_EA)) { 9241 mask |= CPSR_A; 9242 } 9243 if (!(env->cp15.scr_el3 & SCR_IRQ)) { 9244 mask |= CPSR_I; 9245 } 9246 if (!(env->cp15.scr_el3 & SCR_FIQ)) { 9247 mask |= CPSR_F; 9248 } 9249 9250 addr += env->cp15.hvbar; 9251 9252 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr); 9253 } 9254 9255 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) 9256 { 9257 ARMCPU *cpu = ARM_CPU(cs); 9258 CPUARMState *env = &cpu->env; 9259 uint32_t addr; 9260 uint32_t mask; 9261 int new_mode; 9262 uint32_t offset; 9263 uint32_t moe; 9264 9265 /* If this is a debug exception we must update the DBGDSCR.MOE bits */ 9266 switch (syn_get_ec(env->exception.syndrome)) { 9267 case EC_BREAKPOINT: 9268 case EC_BREAKPOINT_SAME_EL: 9269 moe = 1; 9270 break; 9271 case EC_WATCHPOINT: 9272 case EC_WATCHPOINT_SAME_EL: 9273 moe = 10; 9274 break; 9275 case EC_AA32_BKPT: 9276 moe = 3; 9277 break; 9278 case EC_VECTORCATCH: 9279 moe = 5; 9280 break; 9281 default: 9282 moe = 0; 9283 break; 9284 } 9285 9286 if (moe) { 9287 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); 9288 } 9289 9290 if (env->exception.target_el == 2) { 9291 arm_cpu_do_interrupt_aarch32_hyp(cs); 9292 return; 9293 } 9294 9295 switch (cs->exception_index) { 9296 case EXCP_UDEF: 9297 new_mode = ARM_CPU_MODE_UND; 9298 addr = 0x04; 9299 mask = CPSR_I; 9300 if (env->thumb) 9301 offset = 2; 9302 else 9303 offset = 4; 9304 break; 9305 case EXCP_SWI: 9306 new_mode = ARM_CPU_MODE_SVC; 9307 addr = 0x08; 9308 mask = CPSR_I; 9309 /* The PC already points to the next instruction. */ 9310 offset = 0; 9311 break; 9312 case EXCP_BKPT: 9313 /* Fall through to prefetch abort. */ 9314 case EXCP_PREFETCH_ABORT: 9315 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); 9316 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); 9317 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", 9318 env->exception.fsr, (uint32_t)env->exception.vaddress); 9319 new_mode = ARM_CPU_MODE_ABT; 9320 addr = 0x0c; 9321 mask = CPSR_A | CPSR_I; 9322 offset = 4; 9323 break; 9324 case EXCP_DATA_ABORT: 9325 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 9326 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); 9327 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", 9328 env->exception.fsr, 9329 (uint32_t)env->exception.vaddress); 9330 new_mode = ARM_CPU_MODE_ABT; 9331 addr = 0x10; 9332 mask = CPSR_A | CPSR_I; 9333 offset = 8; 9334 break; 9335 case EXCP_IRQ: 9336 new_mode = ARM_CPU_MODE_IRQ; 9337 addr = 0x18; 9338 /* Disable IRQ and imprecise data aborts. */ 9339 mask = CPSR_A | CPSR_I; 9340 offset = 4; 9341 if (env->cp15.scr_el3 & SCR_IRQ) { 9342 /* IRQ routed to monitor mode */ 9343 new_mode = ARM_CPU_MODE_MON; 9344 mask |= CPSR_F; 9345 } 9346 break; 9347 case EXCP_FIQ: 9348 new_mode = ARM_CPU_MODE_FIQ; 9349 addr = 0x1c; 9350 /* Disable FIQ, IRQ and imprecise data aborts. */ 9351 mask = CPSR_A | CPSR_I | CPSR_F; 9352 if (env->cp15.scr_el3 & SCR_FIQ) { 9353 /* FIQ routed to monitor mode */ 9354 new_mode = ARM_CPU_MODE_MON; 9355 } 9356 offset = 4; 9357 break; 9358 case EXCP_VIRQ: 9359 new_mode = ARM_CPU_MODE_IRQ; 9360 addr = 0x18; 9361 /* Disable IRQ and imprecise data aborts. */ 9362 mask = CPSR_A | CPSR_I; 9363 offset = 4; 9364 break; 9365 case EXCP_VFIQ: 9366 new_mode = ARM_CPU_MODE_FIQ; 9367 addr = 0x1c; 9368 /* Disable FIQ, IRQ and imprecise data aborts. */ 9369 mask = CPSR_A | CPSR_I | CPSR_F; 9370 offset = 4; 9371 break; 9372 case EXCP_SMC: 9373 new_mode = ARM_CPU_MODE_MON; 9374 addr = 0x08; 9375 mask = CPSR_A | CPSR_I | CPSR_F; 9376 offset = 0; 9377 break; 9378 default: 9379 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9380 return; /* Never happens. Keep compiler happy. */ 9381 } 9382 9383 if (new_mode == ARM_CPU_MODE_MON) { 9384 addr += env->cp15.mvbar; 9385 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { 9386 /* High vectors. When enabled, base address cannot be remapped. */ 9387 addr += 0xffff0000; 9388 } else { 9389 /* ARM v7 architectures provide a vector base address register to remap 9390 * the interrupt vector table. 9391 * This register is only followed in non-monitor mode, and is banked. 9392 * Note: only bits 31:5 are valid. 9393 */ 9394 addr += A32_BANKED_CURRENT_REG_GET(env, vbar); 9395 } 9396 9397 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 9398 env->cp15.scr_el3 &= ~SCR_NS; 9399 } 9400 9401 take_aarch32_exception(env, new_mode, mask, offset, addr); 9402 } 9403 9404 /* Handle exception entry to a target EL which is using AArch64 */ 9405 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) 9406 { 9407 ARMCPU *cpu = ARM_CPU(cs); 9408 CPUARMState *env = &cpu->env; 9409 unsigned int new_el = env->exception.target_el; 9410 target_ulong addr = env->cp15.vbar_el[new_el]; 9411 unsigned int new_mode = aarch64_pstate_mode(new_el, true); 9412 unsigned int old_mode; 9413 unsigned int cur_el = arm_current_el(env); 9414 9415 /* 9416 * Note that new_el can never be 0. If cur_el is 0, then 9417 * el0_a64 is is_a64(), else el0_a64 is ignored. 9418 */ 9419 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); 9420 9421 if (cur_el < new_el) { 9422 /* Entry vector offset depends on whether the implemented EL 9423 * immediately lower than the target level is using AArch32 or AArch64 9424 */ 9425 bool is_aa64; 9426 uint64_t hcr; 9427 9428 switch (new_el) { 9429 case 3: 9430 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; 9431 break; 9432 case 2: 9433 hcr = arm_hcr_el2_eff(env); 9434 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 9435 is_aa64 = (hcr & HCR_RW) != 0; 9436 break; 9437 } 9438 /* fall through */ 9439 case 1: 9440 is_aa64 = is_a64(env); 9441 break; 9442 default: 9443 g_assert_not_reached(); 9444 } 9445 9446 if (is_aa64) { 9447 addr += 0x400; 9448 } else { 9449 addr += 0x600; 9450 } 9451 } else if (pstate_read(env) & PSTATE_SP) { 9452 addr += 0x200; 9453 } 9454 9455 switch (cs->exception_index) { 9456 case EXCP_PREFETCH_ABORT: 9457 case EXCP_DATA_ABORT: 9458 env->cp15.far_el[new_el] = env->exception.vaddress; 9459 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", 9460 env->cp15.far_el[new_el]); 9461 /* fall through */ 9462 case EXCP_BKPT: 9463 case EXCP_UDEF: 9464 case EXCP_SWI: 9465 case EXCP_HVC: 9466 case EXCP_HYP_TRAP: 9467 case EXCP_SMC: 9468 if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) { 9469 /* 9470 * QEMU internal FP/SIMD syndromes from AArch32 include the 9471 * TA and coproc fields which are only exposed if the exception 9472 * is taken to AArch32 Hyp mode. Mask them out to get a valid 9473 * AArch64 format syndrome. 9474 */ 9475 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); 9476 } 9477 env->cp15.esr_el[new_el] = env->exception.syndrome; 9478 break; 9479 case EXCP_IRQ: 9480 case EXCP_VIRQ: 9481 addr += 0x80; 9482 break; 9483 case EXCP_FIQ: 9484 case EXCP_VFIQ: 9485 addr += 0x100; 9486 break; 9487 default: 9488 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9489 } 9490 9491 if (is_a64(env)) { 9492 old_mode = pstate_read(env); 9493 aarch64_save_sp(env, arm_current_el(env)); 9494 env->elr_el[new_el] = env->pc; 9495 } else { 9496 old_mode = cpsr_read(env); 9497 env->elr_el[new_el] = env->regs[15]; 9498 9499 aarch64_sync_32_to_64(env); 9500 9501 env->condexec_bits = 0; 9502 } 9503 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; 9504 9505 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", 9506 env->elr_el[new_el]); 9507 9508 if (cpu_isar_feature(aa64_pan, cpu)) { 9509 /* The value of PSTATE.PAN is normally preserved, except when ... */ 9510 new_mode |= old_mode & PSTATE_PAN; 9511 switch (new_el) { 9512 case 2: 9513 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */ 9514 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) 9515 != (HCR_E2H | HCR_TGE)) { 9516 break; 9517 } 9518 /* fall through */ 9519 case 1: 9520 /* ... the target is EL1 ... */ 9521 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */ 9522 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) { 9523 new_mode |= PSTATE_PAN; 9524 } 9525 break; 9526 } 9527 } 9528 9529 pstate_write(env, PSTATE_DAIF | new_mode); 9530 env->aarch64 = 1; 9531 aarch64_restore_sp(env, new_el); 9532 helper_rebuild_hflags_a64(env, new_el); 9533 9534 env->pc = addr; 9535 9536 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", 9537 new_el, env->pc, pstate_read(env)); 9538 } 9539 9540 /* 9541 * Do semihosting call and set the appropriate return value. All the 9542 * permission and validity checks have been done at translate time. 9543 * 9544 * We only see semihosting exceptions in TCG only as they are not 9545 * trapped to the hypervisor in KVM. 9546 */ 9547 #ifdef CONFIG_TCG 9548 static void handle_semihosting(CPUState *cs) 9549 { 9550 ARMCPU *cpu = ARM_CPU(cs); 9551 CPUARMState *env = &cpu->env; 9552 9553 if (is_a64(env)) { 9554 qemu_log_mask(CPU_LOG_INT, 9555 "...handling as semihosting call 0x%" PRIx64 "\n", 9556 env->xregs[0]); 9557 env->xregs[0] = do_arm_semihosting(env); 9558 env->pc += 4; 9559 } else { 9560 qemu_log_mask(CPU_LOG_INT, 9561 "...handling as semihosting call 0x%x\n", 9562 env->regs[0]); 9563 env->regs[0] = do_arm_semihosting(env); 9564 env->regs[15] += env->thumb ? 2 : 4; 9565 } 9566 } 9567 #endif 9568 9569 /* Handle a CPU exception for A and R profile CPUs. 9570 * Do any appropriate logging, handle PSCI calls, and then hand off 9571 * to the AArch64-entry or AArch32-entry function depending on the 9572 * target exception level's register width. 9573 */ 9574 void arm_cpu_do_interrupt(CPUState *cs) 9575 { 9576 ARMCPU *cpu = ARM_CPU(cs); 9577 CPUARMState *env = &cpu->env; 9578 unsigned int new_el = env->exception.target_el; 9579 9580 assert(!arm_feature(env, ARM_FEATURE_M)); 9581 9582 arm_log_exception(cs->exception_index); 9583 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), 9584 new_el); 9585 if (qemu_loglevel_mask(CPU_LOG_INT) 9586 && !excp_is_internal(cs->exception_index)) { 9587 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", 9588 syn_get_ec(env->exception.syndrome), 9589 env->exception.syndrome); 9590 } 9591 9592 if (arm_is_psci_call(cpu, cs->exception_index)) { 9593 arm_handle_psci_call(cpu); 9594 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); 9595 return; 9596 } 9597 9598 /* 9599 * Semihosting semantics depend on the register width of the code 9600 * that caused the exception, not the target exception level, so 9601 * must be handled here. 9602 */ 9603 #ifdef CONFIG_TCG 9604 if (cs->exception_index == EXCP_SEMIHOST) { 9605 handle_semihosting(cs); 9606 return; 9607 } 9608 #endif 9609 9610 /* Hooks may change global state so BQL should be held, also the 9611 * BQL needs to be held for any modification of 9612 * cs->interrupt_request. 9613 */ 9614 g_assert(qemu_mutex_iothread_locked()); 9615 9616 arm_call_pre_el_change_hook(cpu); 9617 9618 assert(!excp_is_internal(cs->exception_index)); 9619 if (arm_el_is_aa64(env, new_el)) { 9620 arm_cpu_do_interrupt_aarch64(cs); 9621 } else { 9622 arm_cpu_do_interrupt_aarch32(cs); 9623 } 9624 9625 arm_call_el_change_hook(cpu); 9626 9627 if (!kvm_enabled()) { 9628 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; 9629 } 9630 } 9631 #endif /* !CONFIG_USER_ONLY */ 9632 9633 /* Return the exception level which controls this address translation regime */ 9634 static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 9635 { 9636 switch (mmu_idx) { 9637 case ARMMMUIdx_E20_0: 9638 case ARMMMUIdx_E20_2: 9639 case ARMMMUIdx_E20_2_PAN: 9640 case ARMMMUIdx_Stage2: 9641 case ARMMMUIdx_E2: 9642 return 2; 9643 case ARMMMUIdx_SE3: 9644 return 3; 9645 case ARMMMUIdx_SE10_0: 9646 return arm_el_is_aa64(env, 3) ? 1 : 3; 9647 case ARMMMUIdx_SE10_1: 9648 case ARMMMUIdx_SE10_1_PAN: 9649 case ARMMMUIdx_Stage1_E0: 9650 case ARMMMUIdx_Stage1_E1: 9651 case ARMMMUIdx_Stage1_E1_PAN: 9652 case ARMMMUIdx_E10_0: 9653 case ARMMMUIdx_E10_1: 9654 case ARMMMUIdx_E10_1_PAN: 9655 case ARMMMUIdx_MPrivNegPri: 9656 case ARMMMUIdx_MUserNegPri: 9657 case ARMMMUIdx_MPriv: 9658 case ARMMMUIdx_MUser: 9659 case ARMMMUIdx_MSPrivNegPri: 9660 case ARMMMUIdx_MSUserNegPri: 9661 case ARMMMUIdx_MSPriv: 9662 case ARMMMUIdx_MSUser: 9663 return 1; 9664 default: 9665 g_assert_not_reached(); 9666 } 9667 } 9668 9669 uint64_t arm_sctlr(CPUARMState *env, int el) 9670 { 9671 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */ 9672 if (el == 0) { 9673 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); 9674 el = (mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1); 9675 } 9676 return env->cp15.sctlr_el[el]; 9677 } 9678 9679 /* Return the SCTLR value which controls this address translation regime */ 9680 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 9681 { 9682 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 9683 } 9684 9685 #ifndef CONFIG_USER_ONLY 9686 9687 /* Return true if the specified stage of address translation is disabled */ 9688 static inline bool regime_translation_disabled(CPUARMState *env, 9689 ARMMMUIdx mmu_idx) 9690 { 9691 if (arm_feature(env, ARM_FEATURE_M)) { 9692 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & 9693 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 9694 case R_V7M_MPU_CTRL_ENABLE_MASK: 9695 /* Enabled, but not for HardFault and NMI */ 9696 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 9697 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 9698 /* Enabled for all cases */ 9699 return false; 9700 case 0: 9701 default: 9702 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 9703 * we warned about that in armv7m_nvic.c when the guest set it. 9704 */ 9705 return true; 9706 } 9707 } 9708 9709 if (mmu_idx == ARMMMUIdx_Stage2) { 9710 /* HCR.DC means HCR.VM behaves as 1 */ 9711 return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0; 9712 } 9713 9714 if (env->cp15.hcr_el2 & HCR_TGE) { 9715 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */ 9716 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) { 9717 return true; 9718 } 9719 } 9720 9721 if ((env->cp15.hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { 9722 /* HCR.DC means SCTLR_EL1.M behaves as 0 */ 9723 return true; 9724 } 9725 9726 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 9727 } 9728 9729 static inline bool regime_translation_big_endian(CPUARMState *env, 9730 ARMMMUIdx mmu_idx) 9731 { 9732 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 9733 } 9734 9735 /* Return the TTBR associated with this translation regime */ 9736 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, 9737 int ttbrn) 9738 { 9739 if (mmu_idx == ARMMMUIdx_Stage2) { 9740 return env->cp15.vttbr_el2; 9741 } 9742 if (ttbrn == 0) { 9743 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 9744 } else { 9745 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 9746 } 9747 } 9748 9749 #endif /* !CONFIG_USER_ONLY */ 9750 9751 /* Return the TCR controlling this translation regime */ 9752 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 9753 { 9754 if (mmu_idx == ARMMMUIdx_Stage2) { 9755 return &env->cp15.vtcr_el2; 9756 } 9757 return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; 9758 } 9759 9760 /* Convert a possible stage1+2 MMU index into the appropriate 9761 * stage 1 MMU index 9762 */ 9763 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 9764 { 9765 switch (mmu_idx) { 9766 case ARMMMUIdx_E10_0: 9767 return ARMMMUIdx_Stage1_E0; 9768 case ARMMMUIdx_E10_1: 9769 return ARMMMUIdx_Stage1_E1; 9770 case ARMMMUIdx_E10_1_PAN: 9771 return ARMMMUIdx_Stage1_E1_PAN; 9772 default: 9773 return mmu_idx; 9774 } 9775 } 9776 9777 /* Return true if the translation regime is using LPAE format page tables */ 9778 static inline bool regime_using_lpae_format(CPUARMState *env, 9779 ARMMMUIdx mmu_idx) 9780 { 9781 int el = regime_el(env, mmu_idx); 9782 if (el == 2 || arm_el_is_aa64(env, el)) { 9783 return true; 9784 } 9785 if (arm_feature(env, ARM_FEATURE_LPAE) 9786 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { 9787 return true; 9788 } 9789 return false; 9790 } 9791 9792 /* Returns true if the stage 1 translation regime is using LPAE format page 9793 * tables. Used when raising alignment exceptions, whose FSR changes depending 9794 * on whether the long or short descriptor format is in use. */ 9795 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 9796 { 9797 mmu_idx = stage_1_mmu_idx(mmu_idx); 9798 9799 return regime_using_lpae_format(env, mmu_idx); 9800 } 9801 9802 #ifndef CONFIG_USER_ONLY 9803 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 9804 { 9805 switch (mmu_idx) { 9806 case ARMMMUIdx_SE10_0: 9807 case ARMMMUIdx_E20_0: 9808 case ARMMMUIdx_Stage1_E0: 9809 case ARMMMUIdx_MUser: 9810 case ARMMMUIdx_MSUser: 9811 case ARMMMUIdx_MUserNegPri: 9812 case ARMMMUIdx_MSUserNegPri: 9813 return true; 9814 default: 9815 return false; 9816 case ARMMMUIdx_E10_0: 9817 case ARMMMUIdx_E10_1: 9818 case ARMMMUIdx_E10_1_PAN: 9819 g_assert_not_reached(); 9820 } 9821 } 9822 9823 /* Translate section/page access permissions to page 9824 * R/W protection flags 9825 * 9826 * @env: CPUARMState 9827 * @mmu_idx: MMU index indicating required translation regime 9828 * @ap: The 3-bit access permissions (AP[2:0]) 9829 * @domain_prot: The 2-bit domain access permissions 9830 */ 9831 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 9832 int ap, int domain_prot) 9833 { 9834 bool is_user = regime_is_user(env, mmu_idx); 9835 9836 if (domain_prot == 3) { 9837 return PAGE_READ | PAGE_WRITE; 9838 } 9839 9840 switch (ap) { 9841 case 0: 9842 if (arm_feature(env, ARM_FEATURE_V7)) { 9843 return 0; 9844 } 9845 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 9846 case SCTLR_S: 9847 return is_user ? 0 : PAGE_READ; 9848 case SCTLR_R: 9849 return PAGE_READ; 9850 default: 9851 return 0; 9852 } 9853 case 1: 9854 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 9855 case 2: 9856 if (is_user) { 9857 return PAGE_READ; 9858 } else { 9859 return PAGE_READ | PAGE_WRITE; 9860 } 9861 case 3: 9862 return PAGE_READ | PAGE_WRITE; 9863 case 4: /* Reserved. */ 9864 return 0; 9865 case 5: 9866 return is_user ? 0 : PAGE_READ; 9867 case 6: 9868 return PAGE_READ; 9869 case 7: 9870 if (!arm_feature(env, ARM_FEATURE_V6K)) { 9871 return 0; 9872 } 9873 return PAGE_READ; 9874 default: 9875 g_assert_not_reached(); 9876 } 9877 } 9878 9879 /* Translate section/page access permissions to page 9880 * R/W protection flags. 9881 * 9882 * @ap: The 2-bit simple AP (AP[2:1]) 9883 * @is_user: TRUE if accessing from PL0 9884 */ 9885 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 9886 { 9887 switch (ap) { 9888 case 0: 9889 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 9890 case 1: 9891 return PAGE_READ | PAGE_WRITE; 9892 case 2: 9893 return is_user ? 0 : PAGE_READ; 9894 case 3: 9895 return PAGE_READ; 9896 default: 9897 g_assert_not_reached(); 9898 } 9899 } 9900 9901 static inline int 9902 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 9903 { 9904 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 9905 } 9906 9907 /* Translate S2 section/page access permissions to protection flags 9908 * 9909 * @env: CPUARMState 9910 * @s2ap: The 2-bit stage2 access permissions (S2AP) 9911 * @xn: XN (execute-never) bits 9912 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0 9913 */ 9914 static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) 9915 { 9916 int prot = 0; 9917 9918 if (s2ap & 1) { 9919 prot |= PAGE_READ; 9920 } 9921 if (s2ap & 2) { 9922 prot |= PAGE_WRITE; 9923 } 9924 9925 if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) { 9926 switch (xn) { 9927 case 0: 9928 prot |= PAGE_EXEC; 9929 break; 9930 case 1: 9931 if (s1_is_el0) { 9932 prot |= PAGE_EXEC; 9933 } 9934 break; 9935 case 2: 9936 break; 9937 case 3: 9938 if (!s1_is_el0) { 9939 prot |= PAGE_EXEC; 9940 } 9941 break; 9942 default: 9943 g_assert_not_reached(); 9944 } 9945 } else { 9946 if (!extract32(xn, 1, 1)) { 9947 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 9948 prot |= PAGE_EXEC; 9949 } 9950 } 9951 } 9952 return prot; 9953 } 9954 9955 /* Translate section/page access permissions to protection flags 9956 * 9957 * @env: CPUARMState 9958 * @mmu_idx: MMU index indicating required translation regime 9959 * @is_aa64: TRUE if AArch64 9960 * @ap: The 2-bit simple AP (AP[2:1]) 9961 * @ns: NS (non-secure) bit 9962 * @xn: XN (execute-never) bit 9963 * @pxn: PXN (privileged execute-never) bit 9964 */ 9965 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 9966 int ap, int ns, int xn, int pxn) 9967 { 9968 bool is_user = regime_is_user(env, mmu_idx); 9969 int prot_rw, user_rw; 9970 bool have_wxn; 9971 int wxn = 0; 9972 9973 assert(mmu_idx != ARMMMUIdx_Stage2); 9974 9975 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 9976 if (is_user) { 9977 prot_rw = user_rw; 9978 } else { 9979 if (user_rw && regime_is_pan(env, mmu_idx)) { 9980 /* PAN forbids data accesses but doesn't affect insn fetch */ 9981 prot_rw = 0; 9982 } else { 9983 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 9984 } 9985 } 9986 9987 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { 9988 return prot_rw; 9989 } 9990 9991 /* TODO have_wxn should be replaced with 9992 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 9993 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 9994 * compatible processors have EL2, which is required for [U]WXN. 9995 */ 9996 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 9997 9998 if (have_wxn) { 9999 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 10000 } 10001 10002 if (is_aa64) { 10003 if (regime_has_2_ranges(mmu_idx) && !is_user) { 10004 xn = pxn || (user_rw & PAGE_WRITE); 10005 } 10006 } else if (arm_feature(env, ARM_FEATURE_V7)) { 10007 switch (regime_el(env, mmu_idx)) { 10008 case 1: 10009 case 3: 10010 if (is_user) { 10011 xn = xn || !(user_rw & PAGE_READ); 10012 } else { 10013 int uwxn = 0; 10014 if (have_wxn) { 10015 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 10016 } 10017 xn = xn || !(prot_rw & PAGE_READ) || pxn || 10018 (uwxn && (user_rw & PAGE_WRITE)); 10019 } 10020 break; 10021 case 2: 10022 break; 10023 } 10024 } else { 10025 xn = wxn = 0; 10026 } 10027 10028 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 10029 return prot_rw; 10030 } 10031 return prot_rw | PAGE_EXEC; 10032 } 10033 10034 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 10035 uint32_t *table, uint32_t address) 10036 { 10037 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 10038 TCR *tcr = regime_tcr(env, mmu_idx); 10039 10040 if (address & tcr->mask) { 10041 if (tcr->raw_tcr & TTBCR_PD1) { 10042 /* Translation table walk disabled for TTBR1 */ 10043 return false; 10044 } 10045 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 10046 } else { 10047 if (tcr->raw_tcr & TTBCR_PD0) { 10048 /* Translation table walk disabled for TTBR0 */ 10049 return false; 10050 } 10051 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; 10052 } 10053 *table |= (address >> 18) & 0x3ffc; 10054 return true; 10055 } 10056 10057 /* Translate a S1 pagetable walk through S2 if needed. */ 10058 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, 10059 hwaddr addr, MemTxAttrs txattrs, 10060 ARMMMUFaultInfo *fi) 10061 { 10062 if (arm_mmu_idx_is_stage1_of_2(mmu_idx) && 10063 !regime_translation_disabled(env, ARMMMUIdx_Stage2)) { 10064 target_ulong s2size; 10065 hwaddr s2pa; 10066 int s2prot; 10067 int ret; 10068 ARMCacheAttrs cacheattrs = {}; 10069 ARMCacheAttrs *pcacheattrs = NULL; 10070 10071 if (env->cp15.hcr_el2 & HCR_PTW) { 10072 /* 10073 * PTW means we must fault if this S1 walk touches S2 Device 10074 * memory; otherwise we don't care about the attributes and can 10075 * save the S2 translation the effort of computing them. 10076 */ 10077 pcacheattrs = &cacheattrs; 10078 } 10079 10080 ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, ARMMMUIdx_Stage2, 10081 false, 10082 &s2pa, &txattrs, &s2prot, &s2size, fi, 10083 pcacheattrs); 10084 if (ret) { 10085 assert(fi->type != ARMFault_None); 10086 fi->s2addr = addr; 10087 fi->stage2 = true; 10088 fi->s1ptw = true; 10089 return ~0; 10090 } 10091 if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) { 10092 /* Access was to Device memory: generate Permission fault */ 10093 fi->type = ARMFault_Permission; 10094 fi->s2addr = addr; 10095 fi->stage2 = true; 10096 fi->s1ptw = true; 10097 return ~0; 10098 } 10099 addr = s2pa; 10100 } 10101 return addr; 10102 } 10103 10104 /* All loads done in the course of a page table walk go through here. */ 10105 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, 10106 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 10107 { 10108 ARMCPU *cpu = ARM_CPU(cs); 10109 CPUARMState *env = &cpu->env; 10110 MemTxAttrs attrs = {}; 10111 MemTxResult result = MEMTX_OK; 10112 AddressSpace *as; 10113 uint32_t data; 10114 10115 attrs.secure = is_secure; 10116 as = arm_addressspace(cs, attrs); 10117 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 10118 if (fi->s1ptw) { 10119 return 0; 10120 } 10121 if (regime_translation_big_endian(env, mmu_idx)) { 10122 data = address_space_ldl_be(as, addr, attrs, &result); 10123 } else { 10124 data = address_space_ldl_le(as, addr, attrs, &result); 10125 } 10126 if (result == MEMTX_OK) { 10127 return data; 10128 } 10129 fi->type = ARMFault_SyncExternalOnWalk; 10130 fi->ea = arm_extabort_type(result); 10131 return 0; 10132 } 10133 10134 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, 10135 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 10136 { 10137 ARMCPU *cpu = ARM_CPU(cs); 10138 CPUARMState *env = &cpu->env; 10139 MemTxAttrs attrs = {}; 10140 MemTxResult result = MEMTX_OK; 10141 AddressSpace *as; 10142 uint64_t data; 10143 10144 attrs.secure = is_secure; 10145 as = arm_addressspace(cs, attrs); 10146 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 10147 if (fi->s1ptw) { 10148 return 0; 10149 } 10150 if (regime_translation_big_endian(env, mmu_idx)) { 10151 data = address_space_ldq_be(as, addr, attrs, &result); 10152 } else { 10153 data = address_space_ldq_le(as, addr, attrs, &result); 10154 } 10155 if (result == MEMTX_OK) { 10156 return data; 10157 } 10158 fi->type = ARMFault_SyncExternalOnWalk; 10159 fi->ea = arm_extabort_type(result); 10160 return 0; 10161 } 10162 10163 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, 10164 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10165 hwaddr *phys_ptr, int *prot, 10166 target_ulong *page_size, 10167 ARMMMUFaultInfo *fi) 10168 { 10169 CPUState *cs = env_cpu(env); 10170 int level = 1; 10171 uint32_t table; 10172 uint32_t desc; 10173 int type; 10174 int ap; 10175 int domain = 0; 10176 int domain_prot; 10177 hwaddr phys_addr; 10178 uint32_t dacr; 10179 10180 /* Pagetable walk. */ 10181 /* Lookup l1 descriptor. */ 10182 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 10183 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 10184 fi->type = ARMFault_Translation; 10185 goto do_fault; 10186 } 10187 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10188 mmu_idx, fi); 10189 if (fi->type != ARMFault_None) { 10190 goto do_fault; 10191 } 10192 type = (desc & 3); 10193 domain = (desc >> 5) & 0x0f; 10194 if (regime_el(env, mmu_idx) == 1) { 10195 dacr = env->cp15.dacr_ns; 10196 } else { 10197 dacr = env->cp15.dacr_s; 10198 } 10199 domain_prot = (dacr >> (domain * 2)) & 3; 10200 if (type == 0) { 10201 /* Section translation fault. */ 10202 fi->type = ARMFault_Translation; 10203 goto do_fault; 10204 } 10205 if (type != 2) { 10206 level = 2; 10207 } 10208 if (domain_prot == 0 || domain_prot == 2) { 10209 fi->type = ARMFault_Domain; 10210 goto do_fault; 10211 } 10212 if (type == 2) { 10213 /* 1Mb section. */ 10214 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 10215 ap = (desc >> 10) & 3; 10216 *page_size = 1024 * 1024; 10217 } else { 10218 /* Lookup l2 entry. */ 10219 if (type == 1) { 10220 /* Coarse pagetable. */ 10221 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 10222 } else { 10223 /* Fine pagetable. */ 10224 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 10225 } 10226 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10227 mmu_idx, fi); 10228 if (fi->type != ARMFault_None) { 10229 goto do_fault; 10230 } 10231 switch (desc & 3) { 10232 case 0: /* Page translation fault. */ 10233 fi->type = ARMFault_Translation; 10234 goto do_fault; 10235 case 1: /* 64k page. */ 10236 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 10237 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 10238 *page_size = 0x10000; 10239 break; 10240 case 2: /* 4k page. */ 10241 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10242 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 10243 *page_size = 0x1000; 10244 break; 10245 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 10246 if (type == 1) { 10247 /* ARMv6/XScale extended small page format */ 10248 if (arm_feature(env, ARM_FEATURE_XSCALE) 10249 || arm_feature(env, ARM_FEATURE_V6)) { 10250 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10251 *page_size = 0x1000; 10252 } else { 10253 /* UNPREDICTABLE in ARMv5; we choose to take a 10254 * page translation fault. 10255 */ 10256 fi->type = ARMFault_Translation; 10257 goto do_fault; 10258 } 10259 } else { 10260 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 10261 *page_size = 0x400; 10262 } 10263 ap = (desc >> 4) & 3; 10264 break; 10265 default: 10266 /* Never happens, but compiler isn't smart enough to tell. */ 10267 abort(); 10268 } 10269 } 10270 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 10271 *prot |= *prot ? PAGE_EXEC : 0; 10272 if (!(*prot & (1 << access_type))) { 10273 /* Access permission fault. */ 10274 fi->type = ARMFault_Permission; 10275 goto do_fault; 10276 } 10277 *phys_ptr = phys_addr; 10278 return false; 10279 do_fault: 10280 fi->domain = domain; 10281 fi->level = level; 10282 return true; 10283 } 10284 10285 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, 10286 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10287 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 10288 target_ulong *page_size, ARMMMUFaultInfo *fi) 10289 { 10290 CPUState *cs = env_cpu(env); 10291 int level = 1; 10292 uint32_t table; 10293 uint32_t desc; 10294 uint32_t xn; 10295 uint32_t pxn = 0; 10296 int type; 10297 int ap; 10298 int domain = 0; 10299 int domain_prot; 10300 hwaddr phys_addr; 10301 uint32_t dacr; 10302 bool ns; 10303 10304 /* Pagetable walk. */ 10305 /* Lookup l1 descriptor. */ 10306 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 10307 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 10308 fi->type = ARMFault_Translation; 10309 goto do_fault; 10310 } 10311 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10312 mmu_idx, fi); 10313 if (fi->type != ARMFault_None) { 10314 goto do_fault; 10315 } 10316 type = (desc & 3); 10317 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { 10318 /* Section translation fault, or attempt to use the encoding 10319 * which is Reserved on implementations without PXN. 10320 */ 10321 fi->type = ARMFault_Translation; 10322 goto do_fault; 10323 } 10324 if ((type == 1) || !(desc & (1 << 18))) { 10325 /* Page or Section. */ 10326 domain = (desc >> 5) & 0x0f; 10327 } 10328 if (regime_el(env, mmu_idx) == 1) { 10329 dacr = env->cp15.dacr_ns; 10330 } else { 10331 dacr = env->cp15.dacr_s; 10332 } 10333 if (type == 1) { 10334 level = 2; 10335 } 10336 domain_prot = (dacr >> (domain * 2)) & 3; 10337 if (domain_prot == 0 || domain_prot == 2) { 10338 /* Section or Page domain fault */ 10339 fi->type = ARMFault_Domain; 10340 goto do_fault; 10341 } 10342 if (type != 1) { 10343 if (desc & (1 << 18)) { 10344 /* Supersection. */ 10345 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 10346 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 10347 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 10348 *page_size = 0x1000000; 10349 } else { 10350 /* Section. */ 10351 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 10352 *page_size = 0x100000; 10353 } 10354 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 10355 xn = desc & (1 << 4); 10356 pxn = desc & 1; 10357 ns = extract32(desc, 19, 1); 10358 } else { 10359 if (arm_feature(env, ARM_FEATURE_PXN)) { 10360 pxn = (desc >> 2) & 1; 10361 } 10362 ns = extract32(desc, 3, 1); 10363 /* Lookup l2 entry. */ 10364 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 10365 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10366 mmu_idx, fi); 10367 if (fi->type != ARMFault_None) { 10368 goto do_fault; 10369 } 10370 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 10371 switch (desc & 3) { 10372 case 0: /* Page translation fault. */ 10373 fi->type = ARMFault_Translation; 10374 goto do_fault; 10375 case 1: /* 64k page. */ 10376 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 10377 xn = desc & (1 << 15); 10378 *page_size = 0x10000; 10379 break; 10380 case 2: case 3: /* 4k page. */ 10381 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10382 xn = desc & 1; 10383 *page_size = 0x1000; 10384 break; 10385 default: 10386 /* Never happens, but compiler isn't smart enough to tell. */ 10387 abort(); 10388 } 10389 } 10390 if (domain_prot == 3) { 10391 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10392 } else { 10393 if (pxn && !regime_is_user(env, mmu_idx)) { 10394 xn = 1; 10395 } 10396 if (xn && access_type == MMU_INST_FETCH) { 10397 fi->type = ARMFault_Permission; 10398 goto do_fault; 10399 } 10400 10401 if (arm_feature(env, ARM_FEATURE_V6K) && 10402 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 10403 /* The simplified model uses AP[0] as an access control bit. */ 10404 if ((ap & 1) == 0) { 10405 /* Access flag fault. */ 10406 fi->type = ARMFault_AccessFlag; 10407 goto do_fault; 10408 } 10409 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 10410 } else { 10411 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 10412 } 10413 if (*prot && !xn) { 10414 *prot |= PAGE_EXEC; 10415 } 10416 if (!(*prot & (1 << access_type))) { 10417 /* Access permission fault. */ 10418 fi->type = ARMFault_Permission; 10419 goto do_fault; 10420 } 10421 } 10422 if (ns) { 10423 /* The NS bit will (as required by the architecture) have no effect if 10424 * the CPU doesn't support TZ or this is a non-secure translation 10425 * regime, because the attribute will already be non-secure. 10426 */ 10427 attrs->secure = false; 10428 } 10429 *phys_ptr = phys_addr; 10430 return false; 10431 do_fault: 10432 fi->domain = domain; 10433 fi->level = level; 10434 return true; 10435 } 10436 10437 /* 10438 * check_s2_mmu_setup 10439 * @cpu: ARMCPU 10440 * @is_aa64: True if the translation regime is in AArch64 state 10441 * @startlevel: Suggested starting level 10442 * @inputsize: Bitsize of IPAs 10443 * @stride: Page-table stride (See the ARM ARM) 10444 * 10445 * Returns true if the suggested S2 translation parameters are OK and 10446 * false otherwise. 10447 */ 10448 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, 10449 int inputsize, int stride) 10450 { 10451 const int grainsize = stride + 3; 10452 int startsizecheck; 10453 10454 /* Negative levels are never allowed. */ 10455 if (level < 0) { 10456 return false; 10457 } 10458 10459 startsizecheck = inputsize - ((3 - level) * stride + grainsize); 10460 if (startsizecheck < 1 || startsizecheck > stride + 4) { 10461 return false; 10462 } 10463 10464 if (is_aa64) { 10465 CPUARMState *env = &cpu->env; 10466 unsigned int pamax = arm_pamax(cpu); 10467 10468 switch (stride) { 10469 case 13: /* 64KB Pages. */ 10470 if (level == 0 || (level == 1 && pamax <= 42)) { 10471 return false; 10472 } 10473 break; 10474 case 11: /* 16KB Pages. */ 10475 if (level == 0 || (level == 1 && pamax <= 40)) { 10476 return false; 10477 } 10478 break; 10479 case 9: /* 4KB Pages. */ 10480 if (level == 0 && pamax <= 42) { 10481 return false; 10482 } 10483 break; 10484 default: 10485 g_assert_not_reached(); 10486 } 10487 10488 /* Inputsize checks. */ 10489 if (inputsize > pamax && 10490 (arm_el_is_aa64(env, 1) || inputsize > 40)) { 10491 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ 10492 return false; 10493 } 10494 } else { 10495 /* AArch32 only supports 4KB pages. Assert on that. */ 10496 assert(stride == 9); 10497 10498 if (level == 0) { 10499 return false; 10500 } 10501 } 10502 return true; 10503 } 10504 10505 /* Translate from the 4-bit stage 2 representation of 10506 * memory attributes (without cache-allocation hints) to 10507 * the 8-bit representation of the stage 1 MAIR registers 10508 * (which includes allocation hints). 10509 * 10510 * ref: shared/translation/attrs/S2AttrDecode() 10511 * .../S2ConvertAttrsHints() 10512 */ 10513 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) 10514 { 10515 uint8_t hiattr = extract32(s2attrs, 2, 2); 10516 uint8_t loattr = extract32(s2attrs, 0, 2); 10517 uint8_t hihint = 0, lohint = 0; 10518 10519 if (hiattr != 0) { /* normal memory */ 10520 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */ 10521 hiattr = loattr = 1; /* non-cacheable */ 10522 } else { 10523 if (hiattr != 1) { /* Write-through or write-back */ 10524 hihint = 3; /* RW allocate */ 10525 } 10526 if (loattr != 1) { /* Write-through or write-back */ 10527 lohint = 3; /* RW allocate */ 10528 } 10529 } 10530 } 10531 10532 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 10533 } 10534 #endif /* !CONFIG_USER_ONLY */ 10535 10536 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx) 10537 { 10538 if (regime_has_2_ranges(mmu_idx)) { 10539 return extract64(tcr, 37, 2); 10540 } else if (mmu_idx == ARMMMUIdx_Stage2) { 10541 return 0; /* VTCR_EL2 */ 10542 } else { 10543 /* Replicate the single TBI bit so we always have 2 bits. */ 10544 return extract32(tcr, 20, 1) * 3; 10545 } 10546 } 10547 10548 static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx) 10549 { 10550 if (regime_has_2_ranges(mmu_idx)) { 10551 return extract64(tcr, 51, 2); 10552 } else if (mmu_idx == ARMMMUIdx_Stage2) { 10553 return 0; /* VTCR_EL2 */ 10554 } else { 10555 /* Replicate the single TBID bit so we always have 2 bits. */ 10556 return extract32(tcr, 29, 1) * 3; 10557 } 10558 } 10559 10560 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 10561 ARMMMUIdx mmu_idx, bool data) 10562 { 10563 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 10564 bool epd, hpd, using16k, using64k; 10565 int select, tsz, tbi; 10566 10567 if (!regime_has_2_ranges(mmu_idx)) { 10568 select = 0; 10569 tsz = extract32(tcr, 0, 6); 10570 using64k = extract32(tcr, 14, 1); 10571 using16k = extract32(tcr, 15, 1); 10572 if (mmu_idx == ARMMMUIdx_Stage2) { 10573 /* VTCR_EL2 */ 10574 hpd = false; 10575 } else { 10576 hpd = extract32(tcr, 24, 1); 10577 } 10578 epd = false; 10579 } else { 10580 /* 10581 * Bit 55 is always between the two regions, and is canonical for 10582 * determining if address tagging is enabled. 10583 */ 10584 select = extract64(va, 55, 1); 10585 if (!select) { 10586 tsz = extract32(tcr, 0, 6); 10587 epd = extract32(tcr, 7, 1); 10588 using64k = extract32(tcr, 14, 1); 10589 using16k = extract32(tcr, 15, 1); 10590 hpd = extract64(tcr, 41, 1); 10591 } else { 10592 int tg = extract32(tcr, 30, 2); 10593 using16k = tg == 1; 10594 using64k = tg == 3; 10595 tsz = extract32(tcr, 16, 6); 10596 epd = extract32(tcr, 23, 1); 10597 hpd = extract64(tcr, 42, 1); 10598 } 10599 } 10600 tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */ 10601 tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */ 10602 10603 /* Present TBI as a composite with TBID. */ 10604 tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 10605 if (!data) { 10606 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); 10607 } 10608 tbi = (tbi >> select) & 1; 10609 10610 return (ARMVAParameters) { 10611 .tsz = tsz, 10612 .select = select, 10613 .tbi = tbi, 10614 .epd = epd, 10615 .hpd = hpd, 10616 .using16k = using16k, 10617 .using64k = using64k, 10618 }; 10619 } 10620 10621 #ifndef CONFIG_USER_ONLY 10622 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, 10623 ARMMMUIdx mmu_idx) 10624 { 10625 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 10626 uint32_t el = regime_el(env, mmu_idx); 10627 int select, tsz; 10628 bool epd, hpd; 10629 10630 if (mmu_idx == ARMMMUIdx_Stage2) { 10631 /* VTCR */ 10632 bool sext = extract32(tcr, 4, 1); 10633 bool sign = extract32(tcr, 3, 1); 10634 10635 /* 10636 * If the sign-extend bit is not the same as t0sz[3], the result 10637 * is unpredictable. Flag this as a guest error. 10638 */ 10639 if (sign != sext) { 10640 qemu_log_mask(LOG_GUEST_ERROR, 10641 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 10642 } 10643 tsz = sextract32(tcr, 0, 4) + 8; 10644 select = 0; 10645 hpd = false; 10646 epd = false; 10647 } else if (el == 2) { 10648 /* HTCR */ 10649 tsz = extract32(tcr, 0, 3); 10650 select = 0; 10651 hpd = extract64(tcr, 24, 1); 10652 epd = false; 10653 } else { 10654 int t0sz = extract32(tcr, 0, 3); 10655 int t1sz = extract32(tcr, 16, 3); 10656 10657 if (t1sz == 0) { 10658 select = va > (0xffffffffu >> t0sz); 10659 } else { 10660 /* Note that we will detect errors later. */ 10661 select = va >= ~(0xffffffffu >> t1sz); 10662 } 10663 if (!select) { 10664 tsz = t0sz; 10665 epd = extract32(tcr, 7, 1); 10666 hpd = extract64(tcr, 41, 1); 10667 } else { 10668 tsz = t1sz; 10669 epd = extract32(tcr, 23, 1); 10670 hpd = extract64(tcr, 42, 1); 10671 } 10672 /* For aarch32, hpd0 is not enabled without t2e as well. */ 10673 hpd &= extract32(tcr, 6, 1); 10674 } 10675 10676 return (ARMVAParameters) { 10677 .tsz = tsz, 10678 .select = select, 10679 .epd = epd, 10680 .hpd = hpd, 10681 }; 10682 } 10683 10684 /** 10685 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format 10686 * 10687 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 10688 * prot and page_size may not be filled in, and the populated fsr value provides 10689 * information on why the translation aborted, in the format of a long-format 10690 * DFSR/IFSR fault register, with the following caveats: 10691 * * the WnR bit is never set (the caller must do this). 10692 * 10693 * @env: CPUARMState 10694 * @address: virtual address to get physical address for 10695 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH 10696 * @mmu_idx: MMU index indicating required translation regime 10697 * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table 10698 * walk), must be true if this is stage 2 of a stage 1+2 walk for an 10699 * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored. 10700 * @phys_ptr: set to the physical address corresponding to the virtual address 10701 * @attrs: set to the memory transaction attributes to use 10702 * @prot: set to the permissions for the page containing phys_ptr 10703 * @page_size_ptr: set to the size of the page containing phys_ptr 10704 * @fi: set to fault info if the translation fails 10705 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 10706 */ 10707 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 10708 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10709 bool s1_is_el0, 10710 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 10711 target_ulong *page_size_ptr, 10712 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 10713 { 10714 ARMCPU *cpu = env_archcpu(env); 10715 CPUState *cs = CPU(cpu); 10716 /* Read an LPAE long-descriptor translation table. */ 10717 ARMFaultType fault_type = ARMFault_Translation; 10718 uint32_t level; 10719 ARMVAParameters param; 10720 uint64_t ttbr; 10721 hwaddr descaddr, indexmask, indexmask_grainsize; 10722 uint32_t tableattrs; 10723 target_ulong page_size; 10724 uint32_t attrs; 10725 int32_t stride; 10726 int addrsize, inputsize; 10727 TCR *tcr = regime_tcr(env, mmu_idx); 10728 int ap, ns, xn, pxn; 10729 uint32_t el = regime_el(env, mmu_idx); 10730 uint64_t descaddrmask; 10731 bool aarch64 = arm_el_is_aa64(env, el); 10732 bool guarded = false; 10733 10734 /* TODO: This code does not support shareability levels. */ 10735 if (aarch64) { 10736 param = aa64_va_parameters(env, address, mmu_idx, 10737 access_type != MMU_INST_FETCH); 10738 level = 0; 10739 addrsize = 64 - 8 * param.tbi; 10740 inputsize = 64 - param.tsz; 10741 } else { 10742 param = aa32_va_parameters(env, address, mmu_idx); 10743 level = 1; 10744 addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); 10745 inputsize = addrsize - param.tsz; 10746 } 10747 10748 /* 10749 * We determined the region when collecting the parameters, but we 10750 * have not yet validated that the address is valid for the region. 10751 * Extract the top bits and verify that they all match select. 10752 * 10753 * For aa32, if inputsize == addrsize, then we have selected the 10754 * region by exclusion in aa32_va_parameters and there is no more 10755 * validation to do here. 10756 */ 10757 if (inputsize < addrsize) { 10758 target_ulong top_bits = sextract64(address, inputsize, 10759 addrsize - inputsize); 10760 if (-top_bits != param.select) { 10761 /* The gap between the two regions is a Translation fault */ 10762 fault_type = ARMFault_Translation; 10763 goto do_fault; 10764 } 10765 } 10766 10767 if (param.using64k) { 10768 stride = 13; 10769 } else if (param.using16k) { 10770 stride = 11; 10771 } else { 10772 stride = 9; 10773 } 10774 10775 /* Note that QEMU ignores shareability and cacheability attributes, 10776 * so we don't need to do anything with the SH, ORGN, IRGN fields 10777 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 10778 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 10779 * implement any ASID-like capability so we can ignore it (instead 10780 * we will always flush the TLB any time the ASID is changed). 10781 */ 10782 ttbr = regime_ttbr(env, mmu_idx, param.select); 10783 10784 /* Here we should have set up all the parameters for the translation: 10785 * inputsize, ttbr, epd, stride, tbi 10786 */ 10787 10788 if (param.epd) { 10789 /* Translation table walk disabled => Translation fault on TLB miss 10790 * Note: This is always 0 on 64-bit EL2 and EL3. 10791 */ 10792 goto do_fault; 10793 } 10794 10795 if (mmu_idx != ARMMMUIdx_Stage2) { 10796 /* The starting level depends on the virtual address size (which can 10797 * be up to 48 bits) and the translation granule size. It indicates 10798 * the number of strides (stride bits at a time) needed to 10799 * consume the bits of the input address. In the pseudocode this is: 10800 * level = 4 - RoundUp((inputsize - grainsize) / stride) 10801 * where their 'inputsize' is our 'inputsize', 'grainsize' is 10802 * our 'stride + 3' and 'stride' is our 'stride'. 10803 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 10804 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 10805 * = 4 - (inputsize - 4) / stride; 10806 */ 10807 level = 4 - (inputsize - 4) / stride; 10808 } else { 10809 /* For stage 2 translations the starting level is specified by the 10810 * VTCR_EL2.SL0 field (whose interpretation depends on the page size) 10811 */ 10812 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); 10813 uint32_t startlevel; 10814 bool ok; 10815 10816 if (!aarch64 || stride == 9) { 10817 /* AArch32 or 4KB pages */ 10818 startlevel = 2 - sl0; 10819 } else { 10820 /* 16KB or 64KB pages */ 10821 startlevel = 3 - sl0; 10822 } 10823 10824 /* Check that the starting level is valid. */ 10825 ok = check_s2_mmu_setup(cpu, aarch64, startlevel, 10826 inputsize, stride); 10827 if (!ok) { 10828 fault_type = ARMFault_Translation; 10829 goto do_fault; 10830 } 10831 level = startlevel; 10832 } 10833 10834 indexmask_grainsize = (1ULL << (stride + 3)) - 1; 10835 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; 10836 10837 /* Now we can extract the actual base address from the TTBR */ 10838 descaddr = extract64(ttbr, 0, 48); 10839 /* 10840 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR 10841 * and also to mask out CnP (bit 0) which could validly be non-zero. 10842 */ 10843 descaddr &= ~indexmask; 10844 10845 /* The address field in the descriptor goes up to bit 39 for ARMv7 10846 * but up to bit 47 for ARMv8, but we use the descaddrmask 10847 * up to bit 39 for AArch32, because we don't need other bits in that case 10848 * to construct next descriptor address (anyway they should be all zeroes). 10849 */ 10850 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & 10851 ~indexmask_grainsize; 10852 10853 /* Secure accesses start with the page table in secure memory and 10854 * can be downgraded to non-secure at any step. Non-secure accesses 10855 * remain non-secure. We implement this by just ORing in the NSTable/NS 10856 * bits at each step. 10857 */ 10858 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); 10859 for (;;) { 10860 uint64_t descriptor; 10861 bool nstable; 10862 10863 descaddr |= (address >> (stride * (4 - level))) & indexmask; 10864 descaddr &= ~7ULL; 10865 nstable = extract32(tableattrs, 4, 1); 10866 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); 10867 if (fi->type != ARMFault_None) { 10868 goto do_fault; 10869 } 10870 10871 if (!(descriptor & 1) || 10872 (!(descriptor & 2) && (level == 3))) { 10873 /* Invalid, or the Reserved level 3 encoding */ 10874 goto do_fault; 10875 } 10876 descaddr = descriptor & descaddrmask; 10877 10878 if ((descriptor & 2) && (level < 3)) { 10879 /* Table entry. The top five bits are attributes which may 10880 * propagate down through lower levels of the table (and 10881 * which are all arranged so that 0 means "no effect", so 10882 * we can gather them up by ORing in the bits at each level). 10883 */ 10884 tableattrs |= extract64(descriptor, 59, 5); 10885 level++; 10886 indexmask = indexmask_grainsize; 10887 continue; 10888 } 10889 /* Block entry at level 1 or 2, or page entry at level 3. 10890 * These are basically the same thing, although the number 10891 * of bits we pull in from the vaddr varies. 10892 */ 10893 page_size = (1ULL << ((stride * (4 - level)) + 3)); 10894 descaddr |= (address & (page_size - 1)); 10895 /* Extract attributes from the descriptor */ 10896 attrs = extract64(descriptor, 2, 10) 10897 | (extract64(descriptor, 52, 12) << 10); 10898 10899 if (mmu_idx == ARMMMUIdx_Stage2) { 10900 /* Stage 2 table descriptors do not include any attribute fields */ 10901 break; 10902 } 10903 /* Merge in attributes from table descriptors */ 10904 attrs |= nstable << 3; /* NS */ 10905 guarded = extract64(descriptor, 50, 1); /* GP */ 10906 if (param.hpd) { 10907 /* HPD disables all the table attributes except NSTable. */ 10908 break; 10909 } 10910 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ 10911 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 10912 * means "force PL1 access only", which means forcing AP[1] to 0. 10913 */ 10914 attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */ 10915 attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */ 10916 break; 10917 } 10918 /* Here descaddr is the final physical address, and attributes 10919 * are all in attrs. 10920 */ 10921 fault_type = ARMFault_AccessFlag; 10922 if ((attrs & (1 << 8)) == 0) { 10923 /* Access flag */ 10924 goto do_fault; 10925 } 10926 10927 ap = extract32(attrs, 4, 2); 10928 10929 if (mmu_idx == ARMMMUIdx_Stage2) { 10930 ns = true; 10931 xn = extract32(attrs, 11, 2); 10932 *prot = get_S2prot(env, ap, xn, s1_is_el0); 10933 } else { 10934 ns = extract32(attrs, 3, 1); 10935 xn = extract32(attrs, 12, 1); 10936 pxn = extract32(attrs, 11, 1); 10937 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); 10938 } 10939 10940 fault_type = ARMFault_Permission; 10941 if (!(*prot & (1 << access_type))) { 10942 goto do_fault; 10943 } 10944 10945 if (ns) { 10946 /* The NS bit will (as required by the architecture) have no effect if 10947 * the CPU doesn't support TZ or this is a non-secure translation 10948 * regime, because the attribute will already be non-secure. 10949 */ 10950 txattrs->secure = false; 10951 } 10952 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */ 10953 if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) { 10954 txattrs->target_tlb_bit0 = true; 10955 } 10956 10957 if (cacheattrs != NULL) { 10958 if (mmu_idx == ARMMMUIdx_Stage2) { 10959 cacheattrs->attrs = convert_stage2_attrs(env, 10960 extract32(attrs, 0, 4)); 10961 } else { 10962 /* Index into MAIR registers for cache attributes */ 10963 uint8_t attrindx = extract32(attrs, 0, 3); 10964 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 10965 assert(attrindx <= 7); 10966 cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 10967 } 10968 cacheattrs->shareability = extract32(attrs, 6, 2); 10969 } 10970 10971 *phys_ptr = descaddr; 10972 *page_size_ptr = page_size; 10973 return false; 10974 10975 do_fault: 10976 fi->type = fault_type; 10977 fi->level = level; 10978 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ 10979 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2); 10980 return true; 10981 } 10982 10983 static inline void get_phys_addr_pmsav7_default(CPUARMState *env, 10984 ARMMMUIdx mmu_idx, 10985 int32_t address, int *prot) 10986 { 10987 if (!arm_feature(env, ARM_FEATURE_M)) { 10988 *prot = PAGE_READ | PAGE_WRITE; 10989 switch (address) { 10990 case 0xF0000000 ... 0xFFFFFFFF: 10991 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 10992 /* hivecs execing is ok */ 10993 *prot |= PAGE_EXEC; 10994 } 10995 break; 10996 case 0x00000000 ... 0x7FFFFFFF: 10997 *prot |= PAGE_EXEC; 10998 break; 10999 } 11000 } else { 11001 /* Default system address map for M profile cores. 11002 * The architecture specifies which regions are execute-never; 11003 * at the MPU level no other checks are defined. 11004 */ 11005 switch (address) { 11006 case 0x00000000 ... 0x1fffffff: /* ROM */ 11007 case 0x20000000 ... 0x3fffffff: /* SRAM */ 11008 case 0x60000000 ... 0x7fffffff: /* RAM */ 11009 case 0x80000000 ... 0x9fffffff: /* RAM */ 11010 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 11011 break; 11012 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 11013 case 0xa0000000 ... 0xbfffffff: /* Device */ 11014 case 0xc0000000 ... 0xdfffffff: /* Device */ 11015 case 0xe0000000 ... 0xffffffff: /* System */ 11016 *prot = PAGE_READ | PAGE_WRITE; 11017 break; 11018 default: 11019 g_assert_not_reached(); 11020 } 11021 } 11022 } 11023 11024 static bool pmsav7_use_background_region(ARMCPU *cpu, 11025 ARMMMUIdx mmu_idx, bool is_user) 11026 { 11027 /* Return true if we should use the default memory map as a 11028 * "background" region if there are no hits against any MPU regions. 11029 */ 11030 CPUARMState *env = &cpu->env; 11031 11032 if (is_user) { 11033 return false; 11034 } 11035 11036 if (arm_feature(env, ARM_FEATURE_M)) { 11037 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] 11038 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 11039 } else { 11040 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 11041 } 11042 } 11043 11044 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) 11045 { 11046 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 11047 return arm_feature(env, ARM_FEATURE_M) && 11048 extract32(address, 20, 12) == 0xe00; 11049 } 11050 11051 static inline bool m_is_system_region(CPUARMState *env, uint32_t address) 11052 { 11053 /* True if address is in the M profile system region 11054 * 0xe0000000 - 0xffffffff 11055 */ 11056 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 11057 } 11058 11059 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, 11060 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11061 hwaddr *phys_ptr, int *prot, 11062 target_ulong *page_size, 11063 ARMMMUFaultInfo *fi) 11064 { 11065 ARMCPU *cpu = env_archcpu(env); 11066 int n; 11067 bool is_user = regime_is_user(env, mmu_idx); 11068 11069 *phys_ptr = address; 11070 *page_size = TARGET_PAGE_SIZE; 11071 *prot = 0; 11072 11073 if (regime_translation_disabled(env, mmu_idx) || 11074 m_is_ppb_region(env, address)) { 11075 /* MPU disabled or M profile PPB access: use default memory map. 11076 * The other case which uses the default memory map in the 11077 * v7M ARM ARM pseudocode is exception vector reads from the vector 11078 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 11079 * which always does a direct read using address_space_ldl(), rather 11080 * than going via this function, so we don't need to check that here. 11081 */ 11082 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 11083 } else { /* MPU enabled */ 11084 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 11085 /* region search */ 11086 uint32_t base = env->pmsav7.drbar[n]; 11087 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 11088 uint32_t rmask; 11089 bool srdis = false; 11090 11091 if (!(env->pmsav7.drsr[n] & 0x1)) { 11092 continue; 11093 } 11094 11095 if (!rsize) { 11096 qemu_log_mask(LOG_GUEST_ERROR, 11097 "DRSR[%d]: Rsize field cannot be 0\n", n); 11098 continue; 11099 } 11100 rsize++; 11101 rmask = (1ull << rsize) - 1; 11102 11103 if (base & rmask) { 11104 qemu_log_mask(LOG_GUEST_ERROR, 11105 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 11106 "to DRSR region size, mask = 0x%" PRIx32 "\n", 11107 n, base, rmask); 11108 continue; 11109 } 11110 11111 if (address < base || address > base + rmask) { 11112 /* 11113 * Address not in this region. We must check whether the 11114 * region covers addresses in the same page as our address. 11115 * In that case we must not report a size that covers the 11116 * whole page for a subsequent hit against a different MPU 11117 * region or the background region, because it would result in 11118 * incorrect TLB hits for subsequent accesses to addresses that 11119 * are in this MPU region. 11120 */ 11121 if (ranges_overlap(base, rmask, 11122 address & TARGET_PAGE_MASK, 11123 TARGET_PAGE_SIZE)) { 11124 *page_size = 1; 11125 } 11126 continue; 11127 } 11128 11129 /* Region matched */ 11130 11131 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 11132 int i, snd; 11133 uint32_t srdis_mask; 11134 11135 rsize -= 3; /* sub region size (power of 2) */ 11136 snd = ((address - base) >> rsize) & 0x7; 11137 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 11138 11139 srdis_mask = srdis ? 0x3 : 0x0; 11140 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 11141 /* This will check in groups of 2, 4 and then 8, whether 11142 * the subregion bits are consistent. rsize is incremented 11143 * back up to give the region size, considering consistent 11144 * adjacent subregions as one region. Stop testing if rsize 11145 * is already big enough for an entire QEMU page. 11146 */ 11147 int snd_rounded = snd & ~(i - 1); 11148 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 11149 snd_rounded + 8, i); 11150 if (srdis_mask ^ srdis_multi) { 11151 break; 11152 } 11153 srdis_mask = (srdis_mask << i) | srdis_mask; 11154 rsize++; 11155 } 11156 } 11157 if (srdis) { 11158 continue; 11159 } 11160 if (rsize < TARGET_PAGE_BITS) { 11161 *page_size = 1 << rsize; 11162 } 11163 break; 11164 } 11165 11166 if (n == -1) { /* no hits */ 11167 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 11168 /* background fault */ 11169 fi->type = ARMFault_Background; 11170 return true; 11171 } 11172 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 11173 } else { /* a MPU hit! */ 11174 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 11175 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 11176 11177 if (m_is_system_region(env, address)) { 11178 /* System space is always execute never */ 11179 xn = 1; 11180 } 11181 11182 if (is_user) { /* User mode AP bit decoding */ 11183 switch (ap) { 11184 case 0: 11185 case 1: 11186 case 5: 11187 break; /* no access */ 11188 case 3: 11189 *prot |= PAGE_WRITE; 11190 /* fall through */ 11191 case 2: 11192 case 6: 11193 *prot |= PAGE_READ | PAGE_EXEC; 11194 break; 11195 case 7: 11196 /* for v7M, same as 6; for R profile a reserved value */ 11197 if (arm_feature(env, ARM_FEATURE_M)) { 11198 *prot |= PAGE_READ | PAGE_EXEC; 11199 break; 11200 } 11201 /* fall through */ 11202 default: 11203 qemu_log_mask(LOG_GUEST_ERROR, 11204 "DRACR[%d]: Bad value for AP bits: 0x%" 11205 PRIx32 "\n", n, ap); 11206 } 11207 } else { /* Priv. mode AP bits decoding */ 11208 switch (ap) { 11209 case 0: 11210 break; /* no access */ 11211 case 1: 11212 case 2: 11213 case 3: 11214 *prot |= PAGE_WRITE; 11215 /* fall through */ 11216 case 5: 11217 case 6: 11218 *prot |= PAGE_READ | PAGE_EXEC; 11219 break; 11220 case 7: 11221 /* for v7M, same as 6; for R profile a reserved value */ 11222 if (arm_feature(env, ARM_FEATURE_M)) { 11223 *prot |= PAGE_READ | PAGE_EXEC; 11224 break; 11225 } 11226 /* fall through */ 11227 default: 11228 qemu_log_mask(LOG_GUEST_ERROR, 11229 "DRACR[%d]: Bad value for AP bits: 0x%" 11230 PRIx32 "\n", n, ap); 11231 } 11232 } 11233 11234 /* execute never */ 11235 if (xn) { 11236 *prot &= ~PAGE_EXEC; 11237 } 11238 } 11239 } 11240 11241 fi->type = ARMFault_Permission; 11242 fi->level = 1; 11243 return !(*prot & (1 << access_type)); 11244 } 11245 11246 static bool v8m_is_sau_exempt(CPUARMState *env, 11247 uint32_t address, MMUAccessType access_type) 11248 { 11249 /* The architecture specifies that certain address ranges are 11250 * exempt from v8M SAU/IDAU checks. 11251 */ 11252 return 11253 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 11254 (address >= 0xe0000000 && address <= 0xe0002fff) || 11255 (address >= 0xe000e000 && address <= 0xe000efff) || 11256 (address >= 0xe002e000 && address <= 0xe002efff) || 11257 (address >= 0xe0040000 && address <= 0xe0041fff) || 11258 (address >= 0xe00ff000 && address <= 0xe00fffff); 11259 } 11260 11261 void v8m_security_lookup(CPUARMState *env, uint32_t address, 11262 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11263 V8M_SAttributes *sattrs) 11264 { 11265 /* Look up the security attributes for this address. Compare the 11266 * pseudocode SecurityCheck() function. 11267 * We assume the caller has zero-initialized *sattrs. 11268 */ 11269 ARMCPU *cpu = env_archcpu(env); 11270 int r; 11271 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 11272 int idau_region = IREGION_NOTVALID; 11273 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 11274 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 11275 11276 if (cpu->idau) { 11277 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 11278 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 11279 11280 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 11281 &idau_nsc); 11282 } 11283 11284 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 11285 /* 0xf0000000..0xffffffff is always S for insn fetches */ 11286 return; 11287 } 11288 11289 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 11290 sattrs->ns = !regime_is_secure(env, mmu_idx); 11291 return; 11292 } 11293 11294 if (idau_region != IREGION_NOTVALID) { 11295 sattrs->irvalid = true; 11296 sattrs->iregion = idau_region; 11297 } 11298 11299 switch (env->sau.ctrl & 3) { 11300 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 11301 break; 11302 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 11303 sattrs->ns = true; 11304 break; 11305 default: /* SAU.ENABLE == 1 */ 11306 for (r = 0; r < cpu->sau_sregion; r++) { 11307 if (env->sau.rlar[r] & 1) { 11308 uint32_t base = env->sau.rbar[r] & ~0x1f; 11309 uint32_t limit = env->sau.rlar[r] | 0x1f; 11310 11311 if (base <= address && limit >= address) { 11312 if (base > addr_page_base || limit < addr_page_limit) { 11313 sattrs->subpage = true; 11314 } 11315 if (sattrs->srvalid) { 11316 /* If we hit in more than one region then we must report 11317 * as Secure, not NS-Callable, with no valid region 11318 * number info. 11319 */ 11320 sattrs->ns = false; 11321 sattrs->nsc = false; 11322 sattrs->sregion = 0; 11323 sattrs->srvalid = false; 11324 break; 11325 } else { 11326 if (env->sau.rlar[r] & 2) { 11327 sattrs->nsc = true; 11328 } else { 11329 sattrs->ns = true; 11330 } 11331 sattrs->srvalid = true; 11332 sattrs->sregion = r; 11333 } 11334 } else { 11335 /* 11336 * Address not in this region. We must check whether the 11337 * region covers addresses in the same page as our address. 11338 * In that case we must not report a size that covers the 11339 * whole page for a subsequent hit against a different MPU 11340 * region or the background region, because it would result 11341 * in incorrect TLB hits for subsequent accesses to 11342 * addresses that are in this MPU region. 11343 */ 11344 if (limit >= base && 11345 ranges_overlap(base, limit - base + 1, 11346 addr_page_base, 11347 TARGET_PAGE_SIZE)) { 11348 sattrs->subpage = true; 11349 } 11350 } 11351 } 11352 } 11353 break; 11354 } 11355 11356 /* 11357 * The IDAU will override the SAU lookup results if it specifies 11358 * higher security than the SAU does. 11359 */ 11360 if (!idau_ns) { 11361 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 11362 sattrs->ns = false; 11363 sattrs->nsc = idau_nsc; 11364 } 11365 } 11366 } 11367 11368 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 11369 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11370 hwaddr *phys_ptr, MemTxAttrs *txattrs, 11371 int *prot, bool *is_subpage, 11372 ARMMMUFaultInfo *fi, uint32_t *mregion) 11373 { 11374 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check 11375 * that a full phys-to-virt translation does). 11376 * mregion is (if not NULL) set to the region number which matched, 11377 * or -1 if no region number is returned (MPU off, address did not 11378 * hit a region, address hit in multiple regions). 11379 * We set is_subpage to true if the region hit doesn't cover the 11380 * entire TARGET_PAGE the address is within. 11381 */ 11382 ARMCPU *cpu = env_archcpu(env); 11383 bool is_user = regime_is_user(env, mmu_idx); 11384 uint32_t secure = regime_is_secure(env, mmu_idx); 11385 int n; 11386 int matchregion = -1; 11387 bool hit = false; 11388 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 11389 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 11390 11391 *is_subpage = false; 11392 *phys_ptr = address; 11393 *prot = 0; 11394 if (mregion) { 11395 *mregion = -1; 11396 } 11397 11398 /* Unlike the ARM ARM pseudocode, we don't need to check whether this 11399 * was an exception vector read from the vector table (which is always 11400 * done using the default system address map), because those accesses 11401 * are done in arm_v7m_load_vector(), which always does a direct 11402 * read using address_space_ldl(), rather than going via this function. 11403 */ 11404 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ 11405 hit = true; 11406 } else if (m_is_ppb_region(env, address)) { 11407 hit = true; 11408 } else { 11409 if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 11410 hit = true; 11411 } 11412 11413 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 11414 /* region search */ 11415 /* Note that the base address is bits [31:5] from the register 11416 * with bits [4:0] all zeroes, but the limit address is bits 11417 * [31:5] from the register with bits [4:0] all ones. 11418 */ 11419 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; 11420 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; 11421 11422 if (!(env->pmsav8.rlar[secure][n] & 0x1)) { 11423 /* Region disabled */ 11424 continue; 11425 } 11426 11427 if (address < base || address > limit) { 11428 /* 11429 * Address not in this region. We must check whether the 11430 * region covers addresses in the same page as our address. 11431 * In that case we must not report a size that covers the 11432 * whole page for a subsequent hit against a different MPU 11433 * region or the background region, because it would result in 11434 * incorrect TLB hits for subsequent accesses to addresses that 11435 * are in this MPU region. 11436 */ 11437 if (limit >= base && 11438 ranges_overlap(base, limit - base + 1, 11439 addr_page_base, 11440 TARGET_PAGE_SIZE)) { 11441 *is_subpage = true; 11442 } 11443 continue; 11444 } 11445 11446 if (base > addr_page_base || limit < addr_page_limit) { 11447 *is_subpage = true; 11448 } 11449 11450 if (matchregion != -1) { 11451 /* Multiple regions match -- always a failure (unlike 11452 * PMSAv7 where highest-numbered-region wins) 11453 */ 11454 fi->type = ARMFault_Permission; 11455 fi->level = 1; 11456 return true; 11457 } 11458 11459 matchregion = n; 11460 hit = true; 11461 } 11462 } 11463 11464 if (!hit) { 11465 /* background fault */ 11466 fi->type = ARMFault_Background; 11467 return true; 11468 } 11469 11470 if (matchregion == -1) { 11471 /* hit using the background region */ 11472 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 11473 } else { 11474 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); 11475 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); 11476 11477 if (m_is_system_region(env, address)) { 11478 /* System space is always execute never */ 11479 xn = 1; 11480 } 11481 11482 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 11483 if (*prot && !xn) { 11484 *prot |= PAGE_EXEC; 11485 } 11486 /* We don't need to look the attribute up in the MAIR0/MAIR1 11487 * registers because that only tells us about cacheability. 11488 */ 11489 if (mregion) { 11490 *mregion = matchregion; 11491 } 11492 } 11493 11494 fi->type = ARMFault_Permission; 11495 fi->level = 1; 11496 return !(*prot & (1 << access_type)); 11497 } 11498 11499 11500 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, 11501 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11502 hwaddr *phys_ptr, MemTxAttrs *txattrs, 11503 int *prot, target_ulong *page_size, 11504 ARMMMUFaultInfo *fi) 11505 { 11506 uint32_t secure = regime_is_secure(env, mmu_idx); 11507 V8M_SAttributes sattrs = {}; 11508 bool ret; 11509 bool mpu_is_subpage; 11510 11511 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 11512 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); 11513 if (access_type == MMU_INST_FETCH) { 11514 /* Instruction fetches always use the MMU bank and the 11515 * transaction attribute determined by the fetch address, 11516 * regardless of CPU state. This is painful for QEMU 11517 * to handle, because it would mean we need to encode 11518 * into the mmu_idx not just the (user, negpri) information 11519 * for the current security state but also that for the 11520 * other security state, which would balloon the number 11521 * of mmu_idx values needed alarmingly. 11522 * Fortunately we can avoid this because it's not actually 11523 * possible to arbitrarily execute code from memory with 11524 * the wrong security attribute: it will always generate 11525 * an exception of some kind or another, apart from the 11526 * special case of an NS CPU executing an SG instruction 11527 * in S&NSC memory. So we always just fail the translation 11528 * here and sort things out in the exception handler 11529 * (including possibly emulating an SG instruction). 11530 */ 11531 if (sattrs.ns != !secure) { 11532 if (sattrs.nsc) { 11533 fi->type = ARMFault_QEMU_NSCExec; 11534 } else { 11535 fi->type = ARMFault_QEMU_SFault; 11536 } 11537 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 11538 *phys_ptr = address; 11539 *prot = 0; 11540 return true; 11541 } 11542 } else { 11543 /* For data accesses we always use the MMU bank indicated 11544 * by the current CPU state, but the security attributes 11545 * might downgrade a secure access to nonsecure. 11546 */ 11547 if (sattrs.ns) { 11548 txattrs->secure = false; 11549 } else if (!secure) { 11550 /* NS access to S memory must fault. 11551 * Architecturally we should first check whether the 11552 * MPU information for this address indicates that we 11553 * are doing an unaligned access to Device memory, which 11554 * should generate a UsageFault instead. QEMU does not 11555 * currently check for that kind of unaligned access though. 11556 * If we added it we would need to do so as a special case 11557 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 11558 */ 11559 fi->type = ARMFault_QEMU_SFault; 11560 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 11561 *phys_ptr = address; 11562 *prot = 0; 11563 return true; 11564 } 11565 } 11566 } 11567 11568 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, 11569 txattrs, prot, &mpu_is_subpage, fi, NULL); 11570 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; 11571 return ret; 11572 } 11573 11574 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, 11575 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11576 hwaddr *phys_ptr, int *prot, 11577 ARMMMUFaultInfo *fi) 11578 { 11579 int n; 11580 uint32_t mask; 11581 uint32_t base; 11582 bool is_user = regime_is_user(env, mmu_idx); 11583 11584 if (regime_translation_disabled(env, mmu_idx)) { 11585 /* MPU disabled. */ 11586 *phys_ptr = address; 11587 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 11588 return false; 11589 } 11590 11591 *phys_ptr = address; 11592 for (n = 7; n >= 0; n--) { 11593 base = env->cp15.c6_region[n]; 11594 if ((base & 1) == 0) { 11595 continue; 11596 } 11597 mask = 1 << ((base >> 1) & 0x1f); 11598 /* Keep this shift separate from the above to avoid an 11599 (undefined) << 32. */ 11600 mask = (mask << 1) - 1; 11601 if (((base ^ address) & ~mask) == 0) { 11602 break; 11603 } 11604 } 11605 if (n < 0) { 11606 fi->type = ARMFault_Background; 11607 return true; 11608 } 11609 11610 if (access_type == MMU_INST_FETCH) { 11611 mask = env->cp15.pmsav5_insn_ap; 11612 } else { 11613 mask = env->cp15.pmsav5_data_ap; 11614 } 11615 mask = (mask >> (n * 4)) & 0xf; 11616 switch (mask) { 11617 case 0: 11618 fi->type = ARMFault_Permission; 11619 fi->level = 1; 11620 return true; 11621 case 1: 11622 if (is_user) { 11623 fi->type = ARMFault_Permission; 11624 fi->level = 1; 11625 return true; 11626 } 11627 *prot = PAGE_READ | PAGE_WRITE; 11628 break; 11629 case 2: 11630 *prot = PAGE_READ; 11631 if (!is_user) { 11632 *prot |= PAGE_WRITE; 11633 } 11634 break; 11635 case 3: 11636 *prot = PAGE_READ | PAGE_WRITE; 11637 break; 11638 case 5: 11639 if (is_user) { 11640 fi->type = ARMFault_Permission; 11641 fi->level = 1; 11642 return true; 11643 } 11644 *prot = PAGE_READ; 11645 break; 11646 case 6: 11647 *prot = PAGE_READ; 11648 break; 11649 default: 11650 /* Bad permission. */ 11651 fi->type = ARMFault_Permission; 11652 fi->level = 1; 11653 return true; 11654 } 11655 *prot |= PAGE_EXEC; 11656 return false; 11657 } 11658 11659 /* Combine either inner or outer cacheability attributes for normal 11660 * memory, according to table D4-42 and pseudocode procedure 11661 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 11662 * 11663 * NB: only stage 1 includes allocation hints (RW bits), leading to 11664 * some asymmetry. 11665 */ 11666 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 11667 { 11668 if (s1 == 4 || s2 == 4) { 11669 /* non-cacheable has precedence */ 11670 return 4; 11671 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 11672 /* stage 1 write-through takes precedence */ 11673 return s1; 11674 } else if (extract32(s2, 2, 2) == 2) { 11675 /* stage 2 write-through takes precedence, but the allocation hint 11676 * is still taken from stage 1 11677 */ 11678 return (2 << 2) | extract32(s1, 0, 2); 11679 } else { /* write-back */ 11680 return s1; 11681 } 11682 } 11683 11684 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 11685 * and CombineS1S2Desc() 11686 * 11687 * @s1: Attributes from stage 1 walk 11688 * @s2: Attributes from stage 2 walk 11689 */ 11690 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) 11691 { 11692 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4); 11693 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4); 11694 ARMCacheAttrs ret; 11695 11696 /* Combine shareability attributes (table D4-43) */ 11697 if (s1.shareability == 2 || s2.shareability == 2) { 11698 /* if either are outer-shareable, the result is outer-shareable */ 11699 ret.shareability = 2; 11700 } else if (s1.shareability == 3 || s2.shareability == 3) { 11701 /* if either are inner-shareable, the result is inner-shareable */ 11702 ret.shareability = 3; 11703 } else { 11704 /* both non-shareable */ 11705 ret.shareability = 0; 11706 } 11707 11708 /* Combine memory type and cacheability attributes */ 11709 if (s1hi == 0 || s2hi == 0) { 11710 /* Device has precedence over normal */ 11711 if (s1lo == 0 || s2lo == 0) { 11712 /* nGnRnE has precedence over anything */ 11713 ret.attrs = 0; 11714 } else if (s1lo == 4 || s2lo == 4) { 11715 /* non-Reordering has precedence over Reordering */ 11716 ret.attrs = 4; /* nGnRE */ 11717 } else if (s1lo == 8 || s2lo == 8) { 11718 /* non-Gathering has precedence over Gathering */ 11719 ret.attrs = 8; /* nGRE */ 11720 } else { 11721 ret.attrs = 0xc; /* GRE */ 11722 } 11723 11724 /* Any location for which the resultant memory type is any 11725 * type of Device memory is always treated as Outer Shareable. 11726 */ 11727 ret.shareability = 2; 11728 } else { /* Normal memory */ 11729 /* Outer/inner cacheability combine independently */ 11730 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 11731 | combine_cacheattr_nibble(s1lo, s2lo); 11732 11733 if (ret.attrs == 0x44) { 11734 /* Any location for which the resultant memory type is Normal 11735 * Inner Non-cacheable, Outer Non-cacheable is always treated 11736 * as Outer Shareable. 11737 */ 11738 ret.shareability = 2; 11739 } 11740 } 11741 11742 return ret; 11743 } 11744 11745 11746 /* get_phys_addr - get the physical address for this virtual address 11747 * 11748 * Find the physical address corresponding to the given virtual address, 11749 * by doing a translation table walk on MMU based systems or using the 11750 * MPU state on MPU based systems. 11751 * 11752 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 11753 * prot and page_size may not be filled in, and the populated fsr value provides 11754 * information on why the translation aborted, in the format of a 11755 * DFSR/IFSR fault register, with the following caveats: 11756 * * we honour the short vs long DFSR format differences. 11757 * * the WnR bit is never set (the caller must do this). 11758 * * for PSMAv5 based systems we don't bother to return a full FSR format 11759 * value. 11760 * 11761 * @env: CPUARMState 11762 * @address: virtual address to get physical address for 11763 * @access_type: 0 for read, 1 for write, 2 for execute 11764 * @mmu_idx: MMU index indicating required translation regime 11765 * @phys_ptr: set to the physical address corresponding to the virtual address 11766 * @attrs: set to the memory transaction attributes to use 11767 * @prot: set to the permissions for the page containing phys_ptr 11768 * @page_size: set to the size of the page containing phys_ptr 11769 * @fi: set to fault info if the translation fails 11770 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 11771 */ 11772 bool get_phys_addr(CPUARMState *env, target_ulong address, 11773 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11774 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 11775 target_ulong *page_size, 11776 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 11777 { 11778 if (mmu_idx == ARMMMUIdx_E10_0 || 11779 mmu_idx == ARMMMUIdx_E10_1 || 11780 mmu_idx == ARMMMUIdx_E10_1_PAN) { 11781 /* Call ourselves recursively to do the stage 1 and then stage 2 11782 * translations. 11783 */ 11784 if (arm_feature(env, ARM_FEATURE_EL2)) { 11785 hwaddr ipa; 11786 int s2_prot; 11787 int ret; 11788 ARMCacheAttrs cacheattrs2 = {}; 11789 11790 ret = get_phys_addr(env, address, access_type, 11791 stage_1_mmu_idx(mmu_idx), &ipa, attrs, 11792 prot, page_size, fi, cacheattrs); 11793 11794 /* If S1 fails or S2 is disabled, return early. */ 11795 if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) { 11796 *phys_ptr = ipa; 11797 return ret; 11798 } 11799 11800 /* S1 is done. Now do S2 translation. */ 11801 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2, 11802 mmu_idx == ARMMMUIdx_E10_0, 11803 phys_ptr, attrs, &s2_prot, 11804 page_size, fi, 11805 cacheattrs != NULL ? &cacheattrs2 : NULL); 11806 fi->s2addr = ipa; 11807 /* Combine the S1 and S2 perms. */ 11808 *prot &= s2_prot; 11809 11810 /* Combine the S1 and S2 cache attributes, if needed */ 11811 if (!ret && cacheattrs != NULL) { 11812 if (env->cp15.hcr_el2 & HCR_DC) { 11813 /* 11814 * HCR.DC forces the first stage attributes to 11815 * Normal Non-Shareable, 11816 * Inner Write-Back Read-Allocate Write-Allocate, 11817 * Outer Write-Back Read-Allocate Write-Allocate. 11818 */ 11819 cacheattrs->attrs = 0xff; 11820 cacheattrs->shareability = 0; 11821 } 11822 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 11823 } 11824 11825 return ret; 11826 } else { 11827 /* 11828 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. 11829 */ 11830 mmu_idx = stage_1_mmu_idx(mmu_idx); 11831 } 11832 } 11833 11834 /* The page table entries may downgrade secure to non-secure, but 11835 * cannot upgrade an non-secure translation regime's attributes 11836 * to secure. 11837 */ 11838 attrs->secure = regime_is_secure(env, mmu_idx); 11839 attrs->user = regime_is_user(env, mmu_idx); 11840 11841 /* Fast Context Switch Extension. This doesn't exist at all in v8. 11842 * In v7 and earlier it affects all stage 1 translations. 11843 */ 11844 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 11845 && !arm_feature(env, ARM_FEATURE_V8)) { 11846 if (regime_el(env, mmu_idx) == 3) { 11847 address += env->cp15.fcseidr_s; 11848 } else { 11849 address += env->cp15.fcseidr_ns; 11850 } 11851 } 11852 11853 if (arm_feature(env, ARM_FEATURE_PMSA)) { 11854 bool ret; 11855 *page_size = TARGET_PAGE_SIZE; 11856 11857 if (arm_feature(env, ARM_FEATURE_V8)) { 11858 /* PMSAv8 */ 11859 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, 11860 phys_ptr, attrs, prot, page_size, fi); 11861 } else if (arm_feature(env, ARM_FEATURE_V7)) { 11862 /* PMSAv7 */ 11863 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, 11864 phys_ptr, prot, page_size, fi); 11865 } else { 11866 /* Pre-v7 MPU */ 11867 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, 11868 phys_ptr, prot, fi); 11869 } 11870 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 11871 " mmu_idx %u -> %s (prot %c%c%c)\n", 11872 access_type == MMU_DATA_LOAD ? "reading" : 11873 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 11874 (uint32_t)address, mmu_idx, 11875 ret ? "Miss" : "Hit", 11876 *prot & PAGE_READ ? 'r' : '-', 11877 *prot & PAGE_WRITE ? 'w' : '-', 11878 *prot & PAGE_EXEC ? 'x' : '-'); 11879 11880 return ret; 11881 } 11882 11883 /* Definitely a real MMU, not an MPU */ 11884 11885 if (regime_translation_disabled(env, mmu_idx)) { 11886 /* 11887 * MMU disabled. S1 addresses within aa64 translation regimes are 11888 * still checked for bounds -- see AArch64.TranslateAddressS1Off. 11889 */ 11890 if (mmu_idx != ARMMMUIdx_Stage2) { 11891 int r_el = regime_el(env, mmu_idx); 11892 if (arm_el_is_aa64(env, r_el)) { 11893 int pamax = arm_pamax(env_archcpu(env)); 11894 uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr; 11895 int addrtop, tbi; 11896 11897 tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 11898 if (access_type == MMU_INST_FETCH) { 11899 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); 11900 } 11901 tbi = (tbi >> extract64(address, 55, 1)) & 1; 11902 addrtop = (tbi ? 55 : 63); 11903 11904 if (extract64(address, pamax, addrtop - pamax + 1) != 0) { 11905 fi->type = ARMFault_AddressSize; 11906 fi->level = 0; 11907 fi->stage2 = false; 11908 return 1; 11909 } 11910 11911 /* 11912 * When TBI is disabled, we've just validated that all of the 11913 * bits above PAMax are zero, so logically we only need to 11914 * clear the top byte for TBI. But it's clearer to follow 11915 * the pseudocode set of addrdesc.paddress. 11916 */ 11917 address = extract64(address, 0, 52); 11918 } 11919 } 11920 *phys_ptr = address; 11921 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 11922 *page_size = TARGET_PAGE_SIZE; 11923 return 0; 11924 } 11925 11926 if (regime_using_lpae_format(env, mmu_idx)) { 11927 return get_phys_addr_lpae(env, address, access_type, mmu_idx, false, 11928 phys_ptr, attrs, prot, page_size, 11929 fi, cacheattrs); 11930 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { 11931 return get_phys_addr_v6(env, address, access_type, mmu_idx, 11932 phys_ptr, attrs, prot, page_size, fi); 11933 } else { 11934 return get_phys_addr_v5(env, address, access_type, mmu_idx, 11935 phys_ptr, prot, page_size, fi); 11936 } 11937 } 11938 11939 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 11940 MemTxAttrs *attrs) 11941 { 11942 ARMCPU *cpu = ARM_CPU(cs); 11943 CPUARMState *env = &cpu->env; 11944 hwaddr phys_addr; 11945 target_ulong page_size; 11946 int prot; 11947 bool ret; 11948 ARMMMUFaultInfo fi = {}; 11949 ARMMMUIdx mmu_idx = arm_mmu_idx(env); 11950 11951 *attrs = (MemTxAttrs) {}; 11952 11953 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, 11954 attrs, &prot, &page_size, &fi, NULL); 11955 11956 if (ret) { 11957 return -1; 11958 } 11959 return phys_addr; 11960 } 11961 11962 #endif 11963 11964 /* Note that signed overflow is undefined in C. The following routines are 11965 careful to use unsigned types where modulo arithmetic is required. 11966 Failure to do so _will_ break on newer gcc. */ 11967 11968 /* Signed saturating arithmetic. */ 11969 11970 /* Perform 16-bit signed saturating addition. */ 11971 static inline uint16_t add16_sat(uint16_t a, uint16_t b) 11972 { 11973 uint16_t res; 11974 11975 res = a + b; 11976 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { 11977 if (a & 0x8000) 11978 res = 0x8000; 11979 else 11980 res = 0x7fff; 11981 } 11982 return res; 11983 } 11984 11985 /* Perform 8-bit signed saturating addition. */ 11986 static inline uint8_t add8_sat(uint8_t a, uint8_t b) 11987 { 11988 uint8_t res; 11989 11990 res = a + b; 11991 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { 11992 if (a & 0x80) 11993 res = 0x80; 11994 else 11995 res = 0x7f; 11996 } 11997 return res; 11998 } 11999 12000 /* Perform 16-bit signed saturating subtraction. */ 12001 static inline uint16_t sub16_sat(uint16_t a, uint16_t b) 12002 { 12003 uint16_t res; 12004 12005 res = a - b; 12006 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { 12007 if (a & 0x8000) 12008 res = 0x8000; 12009 else 12010 res = 0x7fff; 12011 } 12012 return res; 12013 } 12014 12015 /* Perform 8-bit signed saturating subtraction. */ 12016 static inline uint8_t sub8_sat(uint8_t a, uint8_t b) 12017 { 12018 uint8_t res; 12019 12020 res = a - b; 12021 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { 12022 if (a & 0x80) 12023 res = 0x80; 12024 else 12025 res = 0x7f; 12026 } 12027 return res; 12028 } 12029 12030 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); 12031 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); 12032 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); 12033 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); 12034 #define PFX q 12035 12036 #include "op_addsub.h" 12037 12038 /* Unsigned saturating arithmetic. */ 12039 static inline uint16_t add16_usat(uint16_t a, uint16_t b) 12040 { 12041 uint16_t res; 12042 res = a + b; 12043 if (res < a) 12044 res = 0xffff; 12045 return res; 12046 } 12047 12048 static inline uint16_t sub16_usat(uint16_t a, uint16_t b) 12049 { 12050 if (a > b) 12051 return a - b; 12052 else 12053 return 0; 12054 } 12055 12056 static inline uint8_t add8_usat(uint8_t a, uint8_t b) 12057 { 12058 uint8_t res; 12059 res = a + b; 12060 if (res < a) 12061 res = 0xff; 12062 return res; 12063 } 12064 12065 static inline uint8_t sub8_usat(uint8_t a, uint8_t b) 12066 { 12067 if (a > b) 12068 return a - b; 12069 else 12070 return 0; 12071 } 12072 12073 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); 12074 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); 12075 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); 12076 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); 12077 #define PFX uq 12078 12079 #include "op_addsub.h" 12080 12081 /* Signed modulo arithmetic. */ 12082 #define SARITH16(a, b, n, op) do { \ 12083 int32_t sum; \ 12084 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ 12085 RESULT(sum, n, 16); \ 12086 if (sum >= 0) \ 12087 ge |= 3 << (n * 2); \ 12088 } while(0) 12089 12090 #define SARITH8(a, b, n, op) do { \ 12091 int32_t sum; \ 12092 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ 12093 RESULT(sum, n, 8); \ 12094 if (sum >= 0) \ 12095 ge |= 1 << n; \ 12096 } while(0) 12097 12098 12099 #define ADD16(a, b, n) SARITH16(a, b, n, +) 12100 #define SUB16(a, b, n) SARITH16(a, b, n, -) 12101 #define ADD8(a, b, n) SARITH8(a, b, n, +) 12102 #define SUB8(a, b, n) SARITH8(a, b, n, -) 12103 #define PFX s 12104 #define ARITH_GE 12105 12106 #include "op_addsub.h" 12107 12108 /* Unsigned modulo arithmetic. */ 12109 #define ADD16(a, b, n) do { \ 12110 uint32_t sum; \ 12111 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ 12112 RESULT(sum, n, 16); \ 12113 if ((sum >> 16) == 1) \ 12114 ge |= 3 << (n * 2); \ 12115 } while(0) 12116 12117 #define ADD8(a, b, n) do { \ 12118 uint32_t sum; \ 12119 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ 12120 RESULT(sum, n, 8); \ 12121 if ((sum >> 8) == 1) \ 12122 ge |= 1 << n; \ 12123 } while(0) 12124 12125 #define SUB16(a, b, n) do { \ 12126 uint32_t sum; \ 12127 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ 12128 RESULT(sum, n, 16); \ 12129 if ((sum >> 16) == 0) \ 12130 ge |= 3 << (n * 2); \ 12131 } while(0) 12132 12133 #define SUB8(a, b, n) do { \ 12134 uint32_t sum; \ 12135 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ 12136 RESULT(sum, n, 8); \ 12137 if ((sum >> 8) == 0) \ 12138 ge |= 1 << n; \ 12139 } while(0) 12140 12141 #define PFX u 12142 #define ARITH_GE 12143 12144 #include "op_addsub.h" 12145 12146 /* Halved signed arithmetic. */ 12147 #define ADD16(a, b, n) \ 12148 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) 12149 #define SUB16(a, b, n) \ 12150 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) 12151 #define ADD8(a, b, n) \ 12152 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) 12153 #define SUB8(a, b, n) \ 12154 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) 12155 #define PFX sh 12156 12157 #include "op_addsub.h" 12158 12159 /* Halved unsigned arithmetic. */ 12160 #define ADD16(a, b, n) \ 12161 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) 12162 #define SUB16(a, b, n) \ 12163 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) 12164 #define ADD8(a, b, n) \ 12165 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) 12166 #define SUB8(a, b, n) \ 12167 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) 12168 #define PFX uh 12169 12170 #include "op_addsub.h" 12171 12172 static inline uint8_t do_usad(uint8_t a, uint8_t b) 12173 { 12174 if (a > b) 12175 return a - b; 12176 else 12177 return b - a; 12178 } 12179 12180 /* Unsigned sum of absolute byte differences. */ 12181 uint32_t HELPER(usad8)(uint32_t a, uint32_t b) 12182 { 12183 uint32_t sum; 12184 sum = do_usad(a, b); 12185 sum += do_usad(a >> 8, b >> 8); 12186 sum += do_usad(a >> 16, b >>16); 12187 sum += do_usad(a >> 24, b >> 24); 12188 return sum; 12189 } 12190 12191 /* For ARMv6 SEL instruction. */ 12192 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) 12193 { 12194 uint32_t mask; 12195 12196 mask = 0; 12197 if (flags & 1) 12198 mask |= 0xff; 12199 if (flags & 2) 12200 mask |= 0xff00; 12201 if (flags & 4) 12202 mask |= 0xff0000; 12203 if (flags & 8) 12204 mask |= 0xff000000; 12205 return (a & mask) | (b & ~mask); 12206 } 12207 12208 /* CRC helpers. 12209 * The upper bytes of val (above the number specified by 'bytes') must have 12210 * been zeroed out by the caller. 12211 */ 12212 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) 12213 { 12214 uint8_t buf[4]; 12215 12216 stl_le_p(buf, val); 12217 12218 /* zlib crc32 converts the accumulator and output to one's complement. */ 12219 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; 12220 } 12221 12222 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) 12223 { 12224 uint8_t buf[4]; 12225 12226 stl_le_p(buf, val); 12227 12228 /* Linux crc32c converts the output to one's complement. */ 12229 return crc32c(acc, buf, bytes) ^ 0xffffffff; 12230 } 12231 12232 /* Return the exception level to which FP-disabled exceptions should 12233 * be taken, or 0 if FP is enabled. 12234 */ 12235 int fp_exception_el(CPUARMState *env, int cur_el) 12236 { 12237 #ifndef CONFIG_USER_ONLY 12238 /* CPACR and the CPTR registers don't exist before v6, so FP is 12239 * always accessible 12240 */ 12241 if (!arm_feature(env, ARM_FEATURE_V6)) { 12242 return 0; 12243 } 12244 12245 if (arm_feature(env, ARM_FEATURE_M)) { 12246 /* CPACR can cause a NOCP UsageFault taken to current security state */ 12247 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) { 12248 return 1; 12249 } 12250 12251 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) { 12252 if (!extract32(env->v7m.nsacr, 10, 1)) { 12253 /* FP insns cause a NOCP UsageFault taken to Secure */ 12254 return 3; 12255 } 12256 } 12257 12258 return 0; 12259 } 12260 12261 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: 12262 * 0, 2 : trap EL0 and EL1/PL1 accesses 12263 * 1 : trap only EL0 accesses 12264 * 3 : trap no accesses 12265 * This register is ignored if E2H+TGE are both set. 12266 */ 12267 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 12268 int fpen = extract32(env->cp15.cpacr_el1, 20, 2); 12269 12270 switch (fpen) { 12271 case 0: 12272 case 2: 12273 if (cur_el == 0 || cur_el == 1) { 12274 /* Trap to PL1, which might be EL1 or EL3 */ 12275 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 12276 return 3; 12277 } 12278 return 1; 12279 } 12280 if (cur_el == 3 && !is_a64(env)) { 12281 /* Secure PL1 running at EL3 */ 12282 return 3; 12283 } 12284 break; 12285 case 1: 12286 if (cur_el == 0) { 12287 return 1; 12288 } 12289 break; 12290 case 3: 12291 break; 12292 } 12293 } 12294 12295 /* 12296 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode 12297 * to control non-secure access to the FPU. It doesn't have any 12298 * effect if EL3 is AArch64 or if EL3 doesn't exist at all. 12299 */ 12300 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 12301 cur_el <= 2 && !arm_is_secure_below_el3(env))) { 12302 if (!extract32(env->cp15.nsacr, 10, 1)) { 12303 /* FP insns act as UNDEF */ 12304 return cur_el == 2 ? 2 : 1; 12305 } 12306 } 12307 12308 /* For the CPTR registers we don't need to guard with an ARM_FEATURE 12309 * check because zero bits in the registers mean "don't trap". 12310 */ 12311 12312 /* CPTR_EL2 : present in v7VE or v8 */ 12313 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1) 12314 && !arm_is_secure_below_el3(env)) { 12315 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */ 12316 return 2; 12317 } 12318 12319 /* CPTR_EL3 : present in v8 */ 12320 if (extract32(env->cp15.cptr_el[3], 10, 1)) { 12321 /* Trap all FP ops to EL3 */ 12322 return 3; 12323 } 12324 #endif 12325 return 0; 12326 } 12327 12328 /* Return the exception level we're running at if this is our mmu_idx */ 12329 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) 12330 { 12331 if (mmu_idx & ARM_MMU_IDX_M) { 12332 return mmu_idx & ARM_MMU_IDX_M_PRIV; 12333 } 12334 12335 switch (mmu_idx) { 12336 case ARMMMUIdx_E10_0: 12337 case ARMMMUIdx_E20_0: 12338 case ARMMMUIdx_SE10_0: 12339 return 0; 12340 case ARMMMUIdx_E10_1: 12341 case ARMMMUIdx_E10_1_PAN: 12342 case ARMMMUIdx_SE10_1: 12343 case ARMMMUIdx_SE10_1_PAN: 12344 return 1; 12345 case ARMMMUIdx_E2: 12346 case ARMMMUIdx_E20_2: 12347 case ARMMMUIdx_E20_2_PAN: 12348 return 2; 12349 case ARMMMUIdx_SE3: 12350 return 3; 12351 default: 12352 g_assert_not_reached(); 12353 } 12354 } 12355 12356 #ifndef CONFIG_TCG 12357 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) 12358 { 12359 g_assert_not_reached(); 12360 } 12361 #endif 12362 12363 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) 12364 { 12365 if (arm_feature(env, ARM_FEATURE_M)) { 12366 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); 12367 } 12368 12369 /* See ARM pseudo-function ELIsInHost. */ 12370 switch (el) { 12371 case 0: 12372 if (arm_is_secure_below_el3(env)) { 12373 return ARMMMUIdx_SE10_0; 12374 } 12375 if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE) 12376 && arm_el_is_aa64(env, 2)) { 12377 return ARMMMUIdx_E20_0; 12378 } 12379 return ARMMMUIdx_E10_0; 12380 case 1: 12381 if (arm_is_secure_below_el3(env)) { 12382 if (env->pstate & PSTATE_PAN) { 12383 return ARMMMUIdx_SE10_1_PAN; 12384 } 12385 return ARMMMUIdx_SE10_1; 12386 } 12387 if (env->pstate & PSTATE_PAN) { 12388 return ARMMMUIdx_E10_1_PAN; 12389 } 12390 return ARMMMUIdx_E10_1; 12391 case 2: 12392 /* TODO: ARMv8.4-SecEL2 */ 12393 /* Note that TGE does not apply at EL2. */ 12394 if ((env->cp15.hcr_el2 & HCR_E2H) && arm_el_is_aa64(env, 2)) { 12395 if (env->pstate & PSTATE_PAN) { 12396 return ARMMMUIdx_E20_2_PAN; 12397 } 12398 return ARMMMUIdx_E20_2; 12399 } 12400 return ARMMMUIdx_E2; 12401 case 3: 12402 return ARMMMUIdx_SE3; 12403 default: 12404 g_assert_not_reached(); 12405 } 12406 } 12407 12408 ARMMMUIdx arm_mmu_idx(CPUARMState *env) 12409 { 12410 return arm_mmu_idx_el(env, arm_current_el(env)); 12411 } 12412 12413 #ifndef CONFIG_USER_ONLY 12414 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 12415 { 12416 return stage_1_mmu_idx(arm_mmu_idx(env)); 12417 } 12418 #endif 12419 12420 static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el, 12421 ARMMMUIdx mmu_idx, uint32_t flags) 12422 { 12423 flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el); 12424 flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, 12425 arm_to_core_mmu_idx(mmu_idx)); 12426 12427 if (arm_singlestep_active(env)) { 12428 flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1); 12429 } 12430 return flags; 12431 } 12432 12433 static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el, 12434 ARMMMUIdx mmu_idx, uint32_t flags) 12435 { 12436 bool sctlr_b = arm_sctlr_b(env); 12437 12438 if (sctlr_b) { 12439 flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1); 12440 } 12441 if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) { 12442 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1); 12443 } 12444 flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env)); 12445 12446 return rebuild_hflags_common(env, fp_el, mmu_idx, flags); 12447 } 12448 12449 static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el, 12450 ARMMMUIdx mmu_idx) 12451 { 12452 uint32_t flags = 0; 12453 12454 if (arm_v7m_is_handler_mode(env)) { 12455 flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1); 12456 } 12457 12458 /* 12459 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN 12460 * is suppressing them because the requested execution priority 12461 * is less than 0. 12462 */ 12463 if (arm_feature(env, ARM_FEATURE_V8) && 12464 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) && 12465 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) { 12466 flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1); 12467 } 12468 12469 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); 12470 } 12471 12472 static uint32_t rebuild_hflags_aprofile(CPUARMState *env) 12473 { 12474 int flags = 0; 12475 12476 flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL, 12477 arm_debug_target_el(env)); 12478 return flags; 12479 } 12480 12481 static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el, 12482 ARMMMUIdx mmu_idx) 12483 { 12484 uint32_t flags = rebuild_hflags_aprofile(env); 12485 12486 if (arm_el_is_aa64(env, 1)) { 12487 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1); 12488 } 12489 12490 if (arm_current_el(env) < 2 && env->cp15.hstr_el2 && 12491 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 12492 flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1); 12493 } 12494 12495 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); 12496 } 12497 12498 static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, 12499 ARMMMUIdx mmu_idx) 12500 { 12501 uint32_t flags = rebuild_hflags_aprofile(env); 12502 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); 12503 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 12504 uint64_t sctlr; 12505 int tbii, tbid; 12506 12507 flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1); 12508 12509 /* Get control bits for tagged addresses. */ 12510 tbid = aa64_va_parameter_tbi(tcr, mmu_idx); 12511 tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx); 12512 12513 flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii); 12514 flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid); 12515 12516 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { 12517 int sve_el = sve_exception_el(env, el); 12518 uint32_t zcr_len; 12519 12520 /* 12521 * If SVE is disabled, but FP is enabled, 12522 * then the effective len is 0. 12523 */ 12524 if (sve_el != 0 && fp_el == 0) { 12525 zcr_len = 0; 12526 } else { 12527 zcr_len = sve_zcr_len_for_el(env, el); 12528 } 12529 flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el); 12530 flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len); 12531 } 12532 12533 sctlr = regime_sctlr(env, stage1); 12534 12535 if (arm_cpu_data_is_big_endian_a64(el, sctlr)) { 12536 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1); 12537 } 12538 12539 if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) { 12540 /* 12541 * In order to save space in flags, we record only whether 12542 * pauth is "inactive", meaning all insns are implemented as 12543 * a nop, or "active" when some action must be performed. 12544 * The decision of which action to take is left to a helper. 12545 */ 12546 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) { 12547 flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1); 12548 } 12549 } 12550 12551 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { 12552 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */ 12553 if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) { 12554 flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1); 12555 } 12556 } 12557 12558 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */ 12559 if (!(env->pstate & PSTATE_UAO)) { 12560 switch (mmu_idx) { 12561 case ARMMMUIdx_E10_1: 12562 case ARMMMUIdx_E10_1_PAN: 12563 case ARMMMUIdx_SE10_1: 12564 case ARMMMUIdx_SE10_1_PAN: 12565 /* TODO: ARMv8.3-NV */ 12566 flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1); 12567 break; 12568 case ARMMMUIdx_E20_2: 12569 case ARMMMUIdx_E20_2_PAN: 12570 /* TODO: ARMv8.4-SecEL2 */ 12571 /* 12572 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is 12573 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR. 12574 */ 12575 if (env->cp15.hcr_el2 & HCR_TGE) { 12576 flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1); 12577 } 12578 break; 12579 default: 12580 break; 12581 } 12582 } 12583 12584 return rebuild_hflags_common(env, fp_el, mmu_idx, flags); 12585 } 12586 12587 static uint32_t rebuild_hflags_internal(CPUARMState *env) 12588 { 12589 int el = arm_current_el(env); 12590 int fp_el = fp_exception_el(env, el); 12591 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12592 12593 if (is_a64(env)) { 12594 return rebuild_hflags_a64(env, el, fp_el, mmu_idx); 12595 } else if (arm_feature(env, ARM_FEATURE_M)) { 12596 return rebuild_hflags_m32(env, fp_el, mmu_idx); 12597 } else { 12598 return rebuild_hflags_a32(env, fp_el, mmu_idx); 12599 } 12600 } 12601 12602 void arm_rebuild_hflags(CPUARMState *env) 12603 { 12604 env->hflags = rebuild_hflags_internal(env); 12605 } 12606 12607 /* 12608 * If we have triggered a EL state change we can't rely on the 12609 * translator having passed it to us, we need to recompute. 12610 */ 12611 void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env) 12612 { 12613 int el = arm_current_el(env); 12614 int fp_el = fp_exception_el(env, el); 12615 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12616 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); 12617 } 12618 12619 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el) 12620 { 12621 int fp_el = fp_exception_el(env, el); 12622 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12623 12624 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); 12625 } 12626 12627 /* 12628 * If we have triggered a EL state change we can't rely on the 12629 * translator having passed it to us, we need to recompute. 12630 */ 12631 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env) 12632 { 12633 int el = arm_current_el(env); 12634 int fp_el = fp_exception_el(env, el); 12635 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12636 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); 12637 } 12638 12639 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el) 12640 { 12641 int fp_el = fp_exception_el(env, el); 12642 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12643 12644 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); 12645 } 12646 12647 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el) 12648 { 12649 int fp_el = fp_exception_el(env, el); 12650 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12651 12652 env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx); 12653 } 12654 12655 static inline void assert_hflags_rebuild_correctly(CPUARMState *env) 12656 { 12657 #ifdef CONFIG_DEBUG_TCG 12658 uint32_t env_flags_current = env->hflags; 12659 uint32_t env_flags_rebuilt = rebuild_hflags_internal(env); 12660 12661 if (unlikely(env_flags_current != env_flags_rebuilt)) { 12662 fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n", 12663 env_flags_current, env_flags_rebuilt); 12664 abort(); 12665 } 12666 #endif 12667 } 12668 12669 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 12670 target_ulong *cs_base, uint32_t *pflags) 12671 { 12672 uint32_t flags = env->hflags; 12673 uint32_t pstate_for_ss; 12674 12675 *cs_base = 0; 12676 assert_hflags_rebuild_correctly(env); 12677 12678 if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) { 12679 *pc = env->pc; 12680 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { 12681 flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype); 12682 } 12683 pstate_for_ss = env->pstate; 12684 } else { 12685 *pc = env->regs[15]; 12686 12687 if (arm_feature(env, ARM_FEATURE_M)) { 12688 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && 12689 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) 12690 != env->v7m.secure) { 12691 flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1); 12692 } 12693 12694 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && 12695 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || 12696 (env->v7m.secure && 12697 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { 12698 /* 12699 * ASPEN is set, but FPCA/SFPA indicate that there is no 12700 * active FP context; we must create a new FP context before 12701 * executing any FP insn. 12702 */ 12703 flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1); 12704 } 12705 12706 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; 12707 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { 12708 flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1); 12709 } 12710 } else { 12711 /* 12712 * Note that XSCALE_CPAR shares bits with VECSTRIDE. 12713 * Note that VECLEN+VECSTRIDE are RES0 for M-profile. 12714 */ 12715 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 12716 flags = FIELD_DP32(flags, TBFLAG_A32, 12717 XSCALE_CPAR, env->cp15.c15_cpar); 12718 } else { 12719 flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN, 12720 env->vfp.vec_len); 12721 flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, 12722 env->vfp.vec_stride); 12723 } 12724 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { 12725 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1); 12726 } 12727 } 12728 12729 flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb); 12730 flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits); 12731 pstate_for_ss = env->uncached_cpsr; 12732 } 12733 12734 /* 12735 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 12736 * states defined in the ARM ARM for software singlestep: 12737 * SS_ACTIVE PSTATE.SS State 12738 * 0 x Inactive (the TB flag for SS is always 0) 12739 * 1 0 Active-pending 12740 * 1 1 Active-not-pending 12741 * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB. 12742 */ 12743 if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) && 12744 (pstate_for_ss & PSTATE_SS)) { 12745 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1); 12746 } 12747 12748 *pflags = flags; 12749 } 12750 12751 #ifdef TARGET_AARCH64 12752 /* 12753 * The manual says that when SVE is enabled and VQ is widened the 12754 * implementation is allowed to zero the previously inaccessible 12755 * portion of the registers. The corollary to that is that when 12756 * SVE is enabled and VQ is narrowed we are also allowed to zero 12757 * the now inaccessible portion of the registers. 12758 * 12759 * The intent of this is that no predicate bit beyond VQ is ever set. 12760 * Which means that some operations on predicate registers themselves 12761 * may operate on full uint64_t or even unrolled across the maximum 12762 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally 12763 * may well be cheaper than conditionals to restrict the operation 12764 * to the relevant portion of a uint16_t[16]. 12765 */ 12766 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) 12767 { 12768 int i, j; 12769 uint64_t pmask; 12770 12771 assert(vq >= 1 && vq <= ARM_MAX_VQ); 12772 assert(vq <= env_archcpu(env)->sve_max_vq); 12773 12774 /* Zap the high bits of the zregs. */ 12775 for (i = 0; i < 32; i++) { 12776 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); 12777 } 12778 12779 /* Zap the high bits of the pregs and ffr. */ 12780 pmask = 0; 12781 if (vq & 3) { 12782 pmask = ~(-1ULL << (16 * (vq & 3))); 12783 } 12784 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) { 12785 for (i = 0; i < 17; ++i) { 12786 env->vfp.pregs[i].p[j] &= pmask; 12787 } 12788 pmask = 0; 12789 } 12790 } 12791 12792 /* 12793 * Notice a change in SVE vector size when changing EL. 12794 */ 12795 void aarch64_sve_change_el(CPUARMState *env, int old_el, 12796 int new_el, bool el0_a64) 12797 { 12798 ARMCPU *cpu = env_archcpu(env); 12799 int old_len, new_len; 12800 bool old_a64, new_a64; 12801 12802 /* Nothing to do if no SVE. */ 12803 if (!cpu_isar_feature(aa64_sve, cpu)) { 12804 return; 12805 } 12806 12807 /* Nothing to do if FP is disabled in either EL. */ 12808 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) { 12809 return; 12810 } 12811 12812 /* 12813 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped 12814 * at ELx, or not available because the EL is in AArch32 state, then 12815 * for all purposes other than a direct read, the ZCR_ELx.LEN field 12816 * has an effective value of 0". 12817 * 12818 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). 12819 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition 12820 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that 12821 * we already have the correct register contents when encountering the 12822 * vq0->vq0 transition between EL0->EL1. 12823 */ 12824 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; 12825 old_len = (old_a64 && !sve_exception_el(env, old_el) 12826 ? sve_zcr_len_for_el(env, old_el) : 0); 12827 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; 12828 new_len = (new_a64 && !sve_exception_el(env, new_el) 12829 ? sve_zcr_len_for_el(env, new_el) : 0); 12830 12831 /* When changing vector length, clear inaccessible state. */ 12832 if (new_len < old_len) { 12833 aarch64_sve_narrow_vq(env, new_len + 1); 12834 } 12835 } 12836 #endif 12837