1 /* 2 * ARM generic helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/units.h" 11 #include "target/arm/idau.h" 12 #include "trace.h" 13 #include "cpu.h" 14 #include "internals.h" 15 #include "exec/gdbstub.h" 16 #include "exec/helper-proto.h" 17 #include "qemu/host-utils.h" 18 #include "qemu/main-loop.h" 19 #include "qemu/bitops.h" 20 #include "qemu/crc32c.h" 21 #include "qemu/qemu-print.h" 22 #include "exec/exec-all.h" 23 #include <zlib.h> /* For crc32 */ 24 #include "hw/irq.h" 25 #include "semihosting/semihost.h" 26 #include "sysemu/cpus.h" 27 #include "sysemu/cpu-timers.h" 28 #include "sysemu/kvm.h" 29 #include "sysemu/tcg.h" 30 #include "qemu/range.h" 31 #include "qapi/qapi-commands-machine-target.h" 32 #include "qapi/error.h" 33 #include "qemu/guest-random.h" 34 #ifdef CONFIG_TCG 35 #include "arm_ldst.h" 36 #include "exec/cpu_ldst.h" 37 #include "semihosting/common-semi.h" 38 #endif 39 40 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ 41 #define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */ 42 43 #ifndef CONFIG_USER_ONLY 44 45 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, 46 MMUAccessType access_type, ARMMMUIdx mmu_idx, 47 bool s1_is_el0, 48 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 49 target_ulong *page_size_ptr, 50 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 51 __attribute__((nonnull)); 52 #endif 53 54 static void switch_mode(CPUARMState *env, int mode); 55 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); 56 57 static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) 58 { 59 ARMCPU *cpu = env_archcpu(env); 60 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; 61 62 /* VFP data registers are always little-endian. */ 63 if (reg < nregs) { 64 return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg)); 65 } 66 if (arm_feature(env, ARM_FEATURE_NEON)) { 67 /* Aliases for Q regs. */ 68 nregs += 16; 69 if (reg < nregs) { 70 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 71 return gdb_get_reg128(buf, q[0], q[1]); 72 } 73 } 74 switch (reg - nregs) { 75 case 0: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); break; 76 case 1: return gdb_get_reg32(buf, vfp_get_fpscr(env)); break; 77 case 2: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); break; 78 } 79 return 0; 80 } 81 82 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 83 { 84 ARMCPU *cpu = env_archcpu(env); 85 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; 86 87 if (reg < nregs) { 88 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); 89 return 8; 90 } 91 if (arm_feature(env, ARM_FEATURE_NEON)) { 92 nregs += 16; 93 if (reg < nregs) { 94 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 95 q[0] = ldq_le_p(buf); 96 q[1] = ldq_le_p(buf + 8); 97 return 16; 98 } 99 } 100 switch (reg - nregs) { 101 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; 102 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4; 103 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4; 104 } 105 return 0; 106 } 107 108 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) 109 { 110 switch (reg) { 111 case 0 ... 31: 112 { 113 /* 128 bit FP register - quads are in LE order */ 114 uint64_t *q = aa64_vfp_qreg(env, reg); 115 return gdb_get_reg128(buf, q[1], q[0]); 116 } 117 case 32: 118 /* FPSR */ 119 return gdb_get_reg32(buf, vfp_get_fpsr(env)); 120 case 33: 121 /* FPCR */ 122 return gdb_get_reg32(buf,vfp_get_fpcr(env)); 123 default: 124 return 0; 125 } 126 } 127 128 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 129 { 130 switch (reg) { 131 case 0 ... 31: 132 /* 128 bit FP register */ 133 { 134 uint64_t *q = aa64_vfp_qreg(env, reg); 135 q[0] = ldq_le_p(buf); 136 q[1] = ldq_le_p(buf + 8); 137 return 16; 138 } 139 case 32: 140 /* FPSR */ 141 vfp_set_fpsr(env, ldl_p(buf)); 142 return 4; 143 case 33: 144 /* FPCR */ 145 vfp_set_fpcr(env, ldl_p(buf)); 146 return 4; 147 default: 148 return 0; 149 } 150 } 151 152 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) 153 { 154 assert(ri->fieldoffset); 155 if (cpreg_field_is_64bit(ri)) { 156 return CPREG_FIELD64(env, ri); 157 } else { 158 return CPREG_FIELD32(env, ri); 159 } 160 } 161 162 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 163 uint64_t value) 164 { 165 assert(ri->fieldoffset); 166 if (cpreg_field_is_64bit(ri)) { 167 CPREG_FIELD64(env, ri) = value; 168 } else { 169 CPREG_FIELD32(env, ri) = value; 170 } 171 } 172 173 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) 174 { 175 return (char *)env + ri->fieldoffset; 176 } 177 178 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) 179 { 180 /* Raw read of a coprocessor register (as needed for migration, etc). */ 181 if (ri->type & ARM_CP_CONST) { 182 return ri->resetvalue; 183 } else if (ri->raw_readfn) { 184 return ri->raw_readfn(env, ri); 185 } else if (ri->readfn) { 186 return ri->readfn(env, ri); 187 } else { 188 return raw_read(env, ri); 189 } 190 } 191 192 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, 193 uint64_t v) 194 { 195 /* Raw write of a coprocessor register (as needed for migration, etc). 196 * Note that constant registers are treated as write-ignored; the 197 * caller should check for success by whether a readback gives the 198 * value written. 199 */ 200 if (ri->type & ARM_CP_CONST) { 201 return; 202 } else if (ri->raw_writefn) { 203 ri->raw_writefn(env, ri, v); 204 } else if (ri->writefn) { 205 ri->writefn(env, ri, v); 206 } else { 207 raw_write(env, ri, v); 208 } 209 } 210 211 /** 212 * arm_get/set_gdb_*: get/set a gdb register 213 * @env: the CPU state 214 * @buf: a buffer to copy to/from 215 * @reg: register number (offset from start of group) 216 * 217 * We return the number of bytes copied 218 */ 219 220 static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg) 221 { 222 ARMCPU *cpu = env_archcpu(env); 223 const ARMCPRegInfo *ri; 224 uint32_t key; 225 226 key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg]; 227 ri = get_arm_cp_reginfo(cpu->cp_regs, key); 228 if (ri) { 229 if (cpreg_field_is_64bit(ri)) { 230 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); 231 } else { 232 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); 233 } 234 } 235 return 0; 236 } 237 238 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) 239 { 240 return 0; 241 } 242 243 #ifdef TARGET_AARCH64 244 static int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg) 245 { 246 ARMCPU *cpu = env_archcpu(env); 247 248 switch (reg) { 249 /* The first 32 registers are the zregs */ 250 case 0 ... 31: 251 { 252 int vq, len = 0; 253 for (vq = 0; vq < cpu->sve_max_vq; vq++) { 254 len += gdb_get_reg128(buf, 255 env->vfp.zregs[reg].d[vq * 2 + 1], 256 env->vfp.zregs[reg].d[vq * 2]); 257 } 258 return len; 259 } 260 case 32: 261 return gdb_get_reg32(buf, vfp_get_fpsr(env)); 262 case 33: 263 return gdb_get_reg32(buf, vfp_get_fpcr(env)); 264 /* then 16 predicates and the ffr */ 265 case 34 ... 50: 266 { 267 int preg = reg - 34; 268 int vq, len = 0; 269 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { 270 len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]); 271 } 272 return len; 273 } 274 case 51: 275 { 276 /* 277 * We report in Vector Granules (VG) which is 64bit in a Z reg 278 * while the ZCR works in Vector Quads (VQ) which is 128bit chunks. 279 */ 280 int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1; 281 return gdb_get_reg64(buf, vq * 2); 282 } 283 default: 284 /* gdbstub asked for something out our range */ 285 qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg); 286 break; 287 } 288 289 return 0; 290 } 291 292 static int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg) 293 { 294 ARMCPU *cpu = env_archcpu(env); 295 296 /* The first 32 registers are the zregs */ 297 switch (reg) { 298 /* The first 32 registers are the zregs */ 299 case 0 ... 31: 300 { 301 int vq, len = 0; 302 uint64_t *p = (uint64_t *) buf; 303 for (vq = 0; vq < cpu->sve_max_vq; vq++) { 304 env->vfp.zregs[reg].d[vq * 2 + 1] = *p++; 305 env->vfp.zregs[reg].d[vq * 2] = *p++; 306 len += 16; 307 } 308 return len; 309 } 310 case 32: 311 vfp_set_fpsr(env, *(uint32_t *)buf); 312 return 4; 313 case 33: 314 vfp_set_fpcr(env, *(uint32_t *)buf); 315 return 4; 316 case 34 ... 50: 317 { 318 int preg = reg - 34; 319 int vq, len = 0; 320 uint64_t *p = (uint64_t *) buf; 321 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { 322 env->vfp.pregs[preg].p[vq / 4] = *p++; 323 len += 8; 324 } 325 return len; 326 } 327 case 51: 328 /* cannot set vg via gdbstub */ 329 return 0; 330 default: 331 /* gdbstub asked for something out our range */ 332 break; 333 } 334 335 return 0; 336 } 337 #endif /* TARGET_AARCH64 */ 338 339 static bool raw_accessors_invalid(const ARMCPRegInfo *ri) 340 { 341 /* Return true if the regdef would cause an assertion if you called 342 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a 343 * program bug for it not to have the NO_RAW flag). 344 * NB that returning false here doesn't necessarily mean that calling 345 * read/write_raw_cp_reg() is safe, because we can't distinguish "has 346 * read/write access functions which are safe for raw use" from "has 347 * read/write access functions which have side effects but has forgotten 348 * to provide raw access functions". 349 * The tests here line up with the conditions in read/write_raw_cp_reg() 350 * and assertions in raw_read()/raw_write(). 351 */ 352 if ((ri->type & ARM_CP_CONST) || 353 ri->fieldoffset || 354 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { 355 return false; 356 } 357 return true; 358 } 359 360 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync) 361 { 362 /* Write the coprocessor state from cpu->env to the (index,value) list. */ 363 int i; 364 bool ok = true; 365 366 for (i = 0; i < cpu->cpreg_array_len; i++) { 367 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 368 const ARMCPRegInfo *ri; 369 uint64_t newval; 370 371 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 372 if (!ri) { 373 ok = false; 374 continue; 375 } 376 if (ri->type & ARM_CP_NO_RAW) { 377 continue; 378 } 379 380 newval = read_raw_cp_reg(&cpu->env, ri); 381 if (kvm_sync) { 382 /* 383 * Only sync if the previous list->cpustate sync succeeded. 384 * Rather than tracking the success/failure state for every 385 * item in the list, we just recheck "does the raw write we must 386 * have made in write_list_to_cpustate() read back OK" here. 387 */ 388 uint64_t oldval = cpu->cpreg_values[i]; 389 390 if (oldval == newval) { 391 continue; 392 } 393 394 write_raw_cp_reg(&cpu->env, ri, oldval); 395 if (read_raw_cp_reg(&cpu->env, ri) != oldval) { 396 continue; 397 } 398 399 write_raw_cp_reg(&cpu->env, ri, newval); 400 } 401 cpu->cpreg_values[i] = newval; 402 } 403 return ok; 404 } 405 406 bool write_list_to_cpustate(ARMCPU *cpu) 407 { 408 int i; 409 bool ok = true; 410 411 for (i = 0; i < cpu->cpreg_array_len; i++) { 412 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 413 uint64_t v = cpu->cpreg_values[i]; 414 const ARMCPRegInfo *ri; 415 416 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 417 if (!ri) { 418 ok = false; 419 continue; 420 } 421 if (ri->type & ARM_CP_NO_RAW) { 422 continue; 423 } 424 /* Write value and confirm it reads back as written 425 * (to catch read-only registers and partially read-only 426 * registers where the incoming migration value doesn't match) 427 */ 428 write_raw_cp_reg(&cpu->env, ri, v); 429 if (read_raw_cp_reg(&cpu->env, ri) != v) { 430 ok = false; 431 } 432 } 433 return ok; 434 } 435 436 static void add_cpreg_to_list(gpointer key, gpointer opaque) 437 { 438 ARMCPU *cpu = opaque; 439 uint64_t regidx; 440 const ARMCPRegInfo *ri; 441 442 regidx = *(uint32_t *)key; 443 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 444 445 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 446 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); 447 /* The value array need not be initialized at this point */ 448 cpu->cpreg_array_len++; 449 } 450 } 451 452 static void count_cpreg(gpointer key, gpointer opaque) 453 { 454 ARMCPU *cpu = opaque; 455 uint64_t regidx; 456 const ARMCPRegInfo *ri; 457 458 regidx = *(uint32_t *)key; 459 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 460 461 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 462 cpu->cpreg_array_len++; 463 } 464 } 465 466 static gint cpreg_key_compare(gconstpointer a, gconstpointer b) 467 { 468 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); 469 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); 470 471 if (aidx > bidx) { 472 return 1; 473 } 474 if (aidx < bidx) { 475 return -1; 476 } 477 return 0; 478 } 479 480 void init_cpreg_list(ARMCPU *cpu) 481 { 482 /* Initialise the cpreg_tuples[] array based on the cp_regs hash. 483 * Note that we require cpreg_tuples[] to be sorted by key ID. 484 */ 485 GList *keys; 486 int arraylen; 487 488 keys = g_hash_table_get_keys(cpu->cp_regs); 489 keys = g_list_sort(keys, cpreg_key_compare); 490 491 cpu->cpreg_array_len = 0; 492 493 g_list_foreach(keys, count_cpreg, cpu); 494 495 arraylen = cpu->cpreg_array_len; 496 cpu->cpreg_indexes = g_new(uint64_t, arraylen); 497 cpu->cpreg_values = g_new(uint64_t, arraylen); 498 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); 499 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); 500 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 501 cpu->cpreg_array_len = 0; 502 503 g_list_foreach(keys, add_cpreg_to_list, cpu); 504 505 assert(cpu->cpreg_array_len == arraylen); 506 507 g_list_free(keys); 508 } 509 510 /* 511 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0. 512 */ 513 static CPAccessResult access_el3_aa32ns(CPUARMState *env, 514 const ARMCPRegInfo *ri, 515 bool isread) 516 { 517 if (!is_a64(env) && arm_current_el(env) == 3 && 518 arm_is_secure_below_el3(env)) { 519 return CP_ACCESS_TRAP_UNCATEGORIZED; 520 } 521 return CP_ACCESS_OK; 522 } 523 524 /* Some secure-only AArch32 registers trap to EL3 if used from 525 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). 526 * Note that an access from Secure EL1 can only happen if EL3 is AArch64. 527 * We assume that the .access field is set to PL1_RW. 528 */ 529 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, 530 const ARMCPRegInfo *ri, 531 bool isread) 532 { 533 if (arm_current_el(env) == 3) { 534 return CP_ACCESS_OK; 535 } 536 if (arm_is_secure_below_el3(env)) { 537 if (env->cp15.scr_el3 & SCR_EEL2) { 538 return CP_ACCESS_TRAP_EL2; 539 } 540 return CP_ACCESS_TRAP_EL3; 541 } 542 /* This will be EL1 NS and EL2 NS, which just UNDEF */ 543 return CP_ACCESS_TRAP_UNCATEGORIZED; 544 } 545 546 static uint64_t arm_mdcr_el2_eff(CPUARMState *env) 547 { 548 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; 549 } 550 551 /* Check for traps to "powerdown debug" registers, which are controlled 552 * by MDCR.TDOSA 553 */ 554 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 555 bool isread) 556 { 557 int el = arm_current_el(env); 558 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 559 bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) || 560 (arm_hcr_el2_eff(env) & HCR_TGE); 561 562 if (el < 2 && mdcr_el2_tdosa) { 563 return CP_ACCESS_TRAP_EL2; 564 } 565 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 566 return CP_ACCESS_TRAP_EL3; 567 } 568 return CP_ACCESS_OK; 569 } 570 571 /* Check for traps to "debug ROM" registers, which are controlled 572 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 573 */ 574 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 575 bool isread) 576 { 577 int el = arm_current_el(env); 578 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 579 bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) || 580 (arm_hcr_el2_eff(env) & HCR_TGE); 581 582 if (el < 2 && mdcr_el2_tdra) { 583 return CP_ACCESS_TRAP_EL2; 584 } 585 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 586 return CP_ACCESS_TRAP_EL3; 587 } 588 return CP_ACCESS_OK; 589 } 590 591 /* Check for traps to general debug registers, which are controlled 592 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 593 */ 594 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 595 bool isread) 596 { 597 int el = arm_current_el(env); 598 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 599 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || 600 (arm_hcr_el2_eff(env) & HCR_TGE); 601 602 if (el < 2 && mdcr_el2_tda) { 603 return CP_ACCESS_TRAP_EL2; 604 } 605 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 606 return CP_ACCESS_TRAP_EL3; 607 } 608 return CP_ACCESS_OK; 609 } 610 611 /* Check for traps to performance monitor registers, which are controlled 612 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. 613 */ 614 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, 615 bool isread) 616 { 617 int el = arm_current_el(env); 618 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 619 620 if (el < 2 && (mdcr_el2 & MDCR_TPM)) { 621 return CP_ACCESS_TRAP_EL2; 622 } 623 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 624 return CP_ACCESS_TRAP_EL3; 625 } 626 return CP_ACCESS_OK; 627 } 628 629 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */ 630 static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri, 631 bool isread) 632 { 633 if (arm_current_el(env) == 1) { 634 uint64_t trap = isread ? HCR_TRVM : HCR_TVM; 635 if (arm_hcr_el2_eff(env) & trap) { 636 return CP_ACCESS_TRAP_EL2; 637 } 638 } 639 return CP_ACCESS_OK; 640 } 641 642 /* Check for traps from EL1 due to HCR_EL2.TSW. */ 643 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri, 644 bool isread) 645 { 646 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) { 647 return CP_ACCESS_TRAP_EL2; 648 } 649 return CP_ACCESS_OK; 650 } 651 652 /* Check for traps from EL1 due to HCR_EL2.TACR. */ 653 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri, 654 bool isread) 655 { 656 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) { 657 return CP_ACCESS_TRAP_EL2; 658 } 659 return CP_ACCESS_OK; 660 } 661 662 /* Check for traps from EL1 due to HCR_EL2.TTLB. */ 663 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri, 664 bool isread) 665 { 666 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) { 667 return CP_ACCESS_TRAP_EL2; 668 } 669 return CP_ACCESS_OK; 670 } 671 672 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 673 { 674 ARMCPU *cpu = env_archcpu(env); 675 676 raw_write(env, ri, value); 677 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ 678 } 679 680 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 681 { 682 ARMCPU *cpu = env_archcpu(env); 683 684 if (raw_read(env, ri) != value) { 685 /* Unlike real hardware the qemu TLB uses virtual addresses, 686 * not modified virtual addresses, so this causes a TLB flush. 687 */ 688 tlb_flush(CPU(cpu)); 689 raw_write(env, ri, value); 690 } 691 } 692 693 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 694 uint64_t value) 695 { 696 ARMCPU *cpu = env_archcpu(env); 697 698 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 699 && !extended_addresses_enabled(env)) { 700 /* For VMSA (when not using the LPAE long descriptor page table 701 * format) this register includes the ASID, so do a TLB flush. 702 * For PMSA it is purely a process ID and no action is needed. 703 */ 704 tlb_flush(CPU(cpu)); 705 } 706 raw_write(env, ri, value); 707 } 708 709 /* IS variants of TLB operations must affect all cores */ 710 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 711 uint64_t value) 712 { 713 CPUState *cs = env_cpu(env); 714 715 tlb_flush_all_cpus_synced(cs); 716 } 717 718 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 719 uint64_t value) 720 { 721 CPUState *cs = env_cpu(env); 722 723 tlb_flush_all_cpus_synced(cs); 724 } 725 726 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 727 uint64_t value) 728 { 729 CPUState *cs = env_cpu(env); 730 731 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 732 } 733 734 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 735 uint64_t value) 736 { 737 CPUState *cs = env_cpu(env); 738 739 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 740 } 741 742 /* 743 * Non-IS variants of TLB operations are upgraded to 744 * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to 745 * force broadcast of these operations. 746 */ 747 static bool tlb_force_broadcast(CPUARMState *env) 748 { 749 return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB); 750 } 751 752 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, 753 uint64_t value) 754 { 755 /* Invalidate all (TLBIALL) */ 756 CPUState *cs = env_cpu(env); 757 758 if (tlb_force_broadcast(env)) { 759 tlb_flush_all_cpus_synced(cs); 760 } else { 761 tlb_flush(cs); 762 } 763 } 764 765 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, 766 uint64_t value) 767 { 768 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 769 CPUState *cs = env_cpu(env); 770 771 value &= TARGET_PAGE_MASK; 772 if (tlb_force_broadcast(env)) { 773 tlb_flush_page_all_cpus_synced(cs, value); 774 } else { 775 tlb_flush_page(cs, value); 776 } 777 } 778 779 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, 780 uint64_t value) 781 { 782 /* Invalidate by ASID (TLBIASID) */ 783 CPUState *cs = env_cpu(env); 784 785 if (tlb_force_broadcast(env)) { 786 tlb_flush_all_cpus_synced(cs); 787 } else { 788 tlb_flush(cs); 789 } 790 } 791 792 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, 793 uint64_t value) 794 { 795 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 796 CPUState *cs = env_cpu(env); 797 798 value &= TARGET_PAGE_MASK; 799 if (tlb_force_broadcast(env)) { 800 tlb_flush_page_all_cpus_synced(cs, value); 801 } else { 802 tlb_flush_page(cs, value); 803 } 804 } 805 806 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, 807 uint64_t value) 808 { 809 CPUState *cs = env_cpu(env); 810 811 tlb_flush_by_mmuidx(cs, 812 ARMMMUIdxBit_E10_1 | 813 ARMMMUIdxBit_E10_1_PAN | 814 ARMMMUIdxBit_E10_0); 815 } 816 817 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 818 uint64_t value) 819 { 820 CPUState *cs = env_cpu(env); 821 822 tlb_flush_by_mmuidx_all_cpus_synced(cs, 823 ARMMMUIdxBit_E10_1 | 824 ARMMMUIdxBit_E10_1_PAN | 825 ARMMMUIdxBit_E10_0); 826 } 827 828 829 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 830 uint64_t value) 831 { 832 CPUState *cs = env_cpu(env); 833 834 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2); 835 } 836 837 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 838 uint64_t value) 839 { 840 CPUState *cs = env_cpu(env); 841 842 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2); 843 } 844 845 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 846 uint64_t value) 847 { 848 CPUState *cs = env_cpu(env); 849 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 850 851 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2); 852 } 853 854 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 855 uint64_t value) 856 { 857 CPUState *cs = env_cpu(env); 858 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 859 860 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 861 ARMMMUIdxBit_E2); 862 } 863 864 static const ARMCPRegInfo cp_reginfo[] = { 865 /* Define the secure and non-secure FCSE identifier CP registers 866 * separately because there is no secure bank in V8 (no _EL3). This allows 867 * the secure register to be properly reset and migrated. There is also no 868 * v8 EL1 version of the register so the non-secure instance stands alone. 869 */ 870 { .name = "FCSEIDR", 871 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 872 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 873 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), 874 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 875 { .name = "FCSEIDR_S", 876 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 877 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 878 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), 879 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 880 /* Define the secure and non-secure context identifier CP registers 881 * separately because there is no secure bank in V8 (no _EL3). This allows 882 * the secure register to be properly reset and migrated. In the 883 * non-secure case, the 32-bit register will have reset and migration 884 * disabled during registration as it is handled by the 64-bit instance. 885 */ 886 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, 887 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 888 .access = PL1_RW, .accessfn = access_tvm_trvm, 889 .secure = ARM_CP_SECSTATE_NS, 890 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), 891 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 892 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, 893 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 894 .access = PL1_RW, .accessfn = access_tvm_trvm, 895 .secure = ARM_CP_SECSTATE_S, 896 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), 897 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 898 REGINFO_SENTINEL 899 }; 900 901 static const ARMCPRegInfo not_v8_cp_reginfo[] = { 902 /* NB: Some of these registers exist in v8 but with more precise 903 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). 904 */ 905 /* MMU Domain access control / MPU write buffer control */ 906 { .name = "DACR", 907 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, 908 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 909 .writefn = dacr_write, .raw_writefn = raw_write, 910 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 911 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 912 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. 913 * For v6 and v5, these mappings are overly broad. 914 */ 915 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, 916 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 917 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, 918 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 919 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, 920 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 921 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, 922 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 923 /* Cache maintenance ops; some of this space may be overridden later. */ 924 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 925 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 926 .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, 927 REGINFO_SENTINEL 928 }; 929 930 static const ARMCPRegInfo not_v6_cp_reginfo[] = { 931 /* Not all pre-v6 cores implemented this WFI, so this is slightly 932 * over-broad. 933 */ 934 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, 935 .access = PL1_W, .type = ARM_CP_WFI }, 936 REGINFO_SENTINEL 937 }; 938 939 static const ARMCPRegInfo not_v7_cp_reginfo[] = { 940 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which 941 * is UNPREDICTABLE; we choose to NOP as most implementations do). 942 */ 943 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 944 .access = PL1_W, .type = ARM_CP_WFI }, 945 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice 946 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and 947 * OMAPCP will override this space. 948 */ 949 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, 950 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), 951 .resetvalue = 0 }, 952 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, 953 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), 954 .resetvalue = 0 }, 955 /* v6 doesn't have the cache ID registers but Linux reads them anyway */ 956 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, 957 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 958 .resetvalue = 0 }, 959 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; 960 * implementing it as RAZ means the "debug architecture version" bits 961 * will read as a reserved value, which should cause Linux to not try 962 * to use the debug hardware. 963 */ 964 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 965 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 966 /* MMU TLB control. Note that the wildcarding means we cover not just 967 * the unified TLB ops but also the dside/iside/inner-shareable variants. 968 */ 969 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, 970 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, 971 .type = ARM_CP_NO_RAW }, 972 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, 973 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, 974 .type = ARM_CP_NO_RAW }, 975 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, 976 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, 977 .type = ARM_CP_NO_RAW }, 978 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, 979 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, 980 .type = ARM_CP_NO_RAW }, 981 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, 982 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, 983 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, 984 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, 985 REGINFO_SENTINEL 986 }; 987 988 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, 989 uint64_t value) 990 { 991 uint32_t mask = 0; 992 993 /* In ARMv8 most bits of CPACR_EL1 are RES0. */ 994 if (!arm_feature(env, ARM_FEATURE_V8)) { 995 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. 996 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. 997 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. 998 */ 999 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { 1000 /* VFP coprocessor: cp10 & cp11 [23:20] */ 1001 mask |= (1 << 31) | (1 << 30) | (0xf << 20); 1002 1003 if (!arm_feature(env, ARM_FEATURE_NEON)) { 1004 /* ASEDIS [31] bit is RAO/WI */ 1005 value |= (1 << 31); 1006 } 1007 1008 /* VFPv3 and upwards with NEON implement 32 double precision 1009 * registers (D0-D31). 1010 */ 1011 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) { 1012 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ 1013 value |= (1 << 30); 1014 } 1015 } 1016 value &= mask; 1017 } 1018 1019 /* 1020 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 1021 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 1022 */ 1023 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 1024 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 1025 value &= ~(0xf << 20); 1026 value |= env->cp15.cpacr_el1 & (0xf << 20); 1027 } 1028 1029 env->cp15.cpacr_el1 = value; 1030 } 1031 1032 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1033 { 1034 /* 1035 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 1036 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 1037 */ 1038 uint64_t value = env->cp15.cpacr_el1; 1039 1040 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 1041 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 1042 value &= ~(0xf << 20); 1043 } 1044 return value; 1045 } 1046 1047 1048 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1049 { 1050 /* Call cpacr_write() so that we reset with the correct RAO bits set 1051 * for our CPU features. 1052 */ 1053 cpacr_write(env, ri, 0); 1054 } 1055 1056 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 1057 bool isread) 1058 { 1059 if (arm_feature(env, ARM_FEATURE_V8)) { 1060 /* Check if CPACR accesses are to be trapped to EL2 */ 1061 if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) && 1062 (env->cp15.cptr_el[2] & CPTR_TCPAC)) { 1063 return CP_ACCESS_TRAP_EL2; 1064 /* Check if CPACR accesses are to be trapped to EL3 */ 1065 } else if (arm_current_el(env) < 3 && 1066 (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 1067 return CP_ACCESS_TRAP_EL3; 1068 } 1069 } 1070 1071 return CP_ACCESS_OK; 1072 } 1073 1074 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, 1075 bool isread) 1076 { 1077 /* Check if CPTR accesses are set to trap to EL3 */ 1078 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 1079 return CP_ACCESS_TRAP_EL3; 1080 } 1081 1082 return CP_ACCESS_OK; 1083 } 1084 1085 static const ARMCPRegInfo v6_cp_reginfo[] = { 1086 /* prefetch by MVA in v6, NOP in v7 */ 1087 { .name = "MVA_prefetch", 1088 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, 1089 .access = PL1_W, .type = ARM_CP_NOP }, 1090 /* We need to break the TB after ISB to execute self-modifying code 1091 * correctly and also to take any pending interrupts immediately. 1092 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. 1093 */ 1094 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, 1095 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, 1096 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, 1097 .access = PL0_W, .type = ARM_CP_NOP }, 1098 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, 1099 .access = PL0_W, .type = ARM_CP_NOP }, 1100 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, 1101 .access = PL1_RW, .accessfn = access_tvm_trvm, 1102 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), 1103 offsetof(CPUARMState, cp15.ifar_ns) }, 1104 .resetvalue = 0, }, 1105 /* Watchpoint Fault Address Register : should actually only be present 1106 * for 1136, 1176, 11MPCore. 1107 */ 1108 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, 1109 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, 1110 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, 1111 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, 1112 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), 1113 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, 1114 REGINFO_SENTINEL 1115 }; 1116 1117 /* Definitions for the PMU registers */ 1118 #define PMCRN_MASK 0xf800 1119 #define PMCRN_SHIFT 11 1120 #define PMCRLC 0x40 1121 #define PMCRDP 0x20 1122 #define PMCRX 0x10 1123 #define PMCRD 0x8 1124 #define PMCRC 0x4 1125 #define PMCRP 0x2 1126 #define PMCRE 0x1 1127 /* 1128 * Mask of PMCR bits writeable by guest (not including WO bits like C, P, 1129 * which can be written as 1 to trigger behaviour but which stay RAZ). 1130 */ 1131 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1132 1133 #define PMXEVTYPER_P 0x80000000 1134 #define PMXEVTYPER_U 0x40000000 1135 #define PMXEVTYPER_NSK 0x20000000 1136 #define PMXEVTYPER_NSU 0x10000000 1137 #define PMXEVTYPER_NSH 0x08000000 1138 #define PMXEVTYPER_M 0x04000000 1139 #define PMXEVTYPER_MT 0x02000000 1140 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1141 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1142 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1143 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1144 PMXEVTYPER_EVTCOUNT) 1145 1146 #define PMCCFILTR 0xf8000000 1147 #define PMCCFILTR_M PMXEVTYPER_M 1148 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1149 1150 static inline uint32_t pmu_num_counters(CPUARMState *env) 1151 { 1152 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; 1153 } 1154 1155 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1156 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1157 { 1158 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); 1159 } 1160 1161 typedef struct pm_event { 1162 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ 1163 /* If the event is supported on this CPU (used to generate PMCEID[01]) */ 1164 bool (*supported)(CPUARMState *); 1165 /* 1166 * Retrieve the current count of the underlying event. The programmed 1167 * counters hold a difference from the return value from this function 1168 */ 1169 uint64_t (*get_count)(CPUARMState *); 1170 /* 1171 * Return how many nanoseconds it will take (at a minimum) for count events 1172 * to occur. A negative value indicates the counter will never overflow, or 1173 * that the counter has otherwise arranged for the overflow bit to be set 1174 * and the PMU interrupt to be raised on overflow. 1175 */ 1176 int64_t (*ns_per_count)(uint64_t); 1177 } pm_event; 1178 1179 static bool event_always_supported(CPUARMState *env) 1180 { 1181 return true; 1182 } 1183 1184 static uint64_t swinc_get_count(CPUARMState *env) 1185 { 1186 /* 1187 * SW_INCR events are written directly to the pmevcntr's by writes to 1188 * PMSWINC, so there is no underlying count maintained by the PMU itself 1189 */ 1190 return 0; 1191 } 1192 1193 static int64_t swinc_ns_per(uint64_t ignored) 1194 { 1195 return -1; 1196 } 1197 1198 /* 1199 * Return the underlying cycle count for the PMU cycle counters. If we're in 1200 * usermode, simply return 0. 1201 */ 1202 static uint64_t cycles_get_count(CPUARMState *env) 1203 { 1204 #ifndef CONFIG_USER_ONLY 1205 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1206 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1207 #else 1208 return cpu_get_host_ticks(); 1209 #endif 1210 } 1211 1212 #ifndef CONFIG_USER_ONLY 1213 static int64_t cycles_ns_per(uint64_t cycles) 1214 { 1215 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; 1216 } 1217 1218 static bool instructions_supported(CPUARMState *env) 1219 { 1220 return icount_enabled() == 1; /* Precise instruction counting */ 1221 } 1222 1223 static uint64_t instructions_get_count(CPUARMState *env) 1224 { 1225 return (uint64_t)icount_get_raw(); 1226 } 1227 1228 static int64_t instructions_ns_per(uint64_t icount) 1229 { 1230 return icount_to_ns((int64_t)icount); 1231 } 1232 #endif 1233 1234 static bool pmu_8_1_events_supported(CPUARMState *env) 1235 { 1236 /* For events which are supported in any v8.1 PMU */ 1237 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env)); 1238 } 1239 1240 static bool pmu_8_4_events_supported(CPUARMState *env) 1241 { 1242 /* For events which are supported in any v8.1 PMU */ 1243 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env)); 1244 } 1245 1246 static uint64_t zero_event_get_count(CPUARMState *env) 1247 { 1248 /* For events which on QEMU never fire, so their count is always zero */ 1249 return 0; 1250 } 1251 1252 static int64_t zero_event_ns_per(uint64_t cycles) 1253 { 1254 /* An event which never fires can never overflow */ 1255 return -1; 1256 } 1257 1258 static const pm_event pm_events[] = { 1259 { .number = 0x000, /* SW_INCR */ 1260 .supported = event_always_supported, 1261 .get_count = swinc_get_count, 1262 .ns_per_count = swinc_ns_per, 1263 }, 1264 #ifndef CONFIG_USER_ONLY 1265 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ 1266 .supported = instructions_supported, 1267 .get_count = instructions_get_count, 1268 .ns_per_count = instructions_ns_per, 1269 }, 1270 { .number = 0x011, /* CPU_CYCLES, Cycle */ 1271 .supported = event_always_supported, 1272 .get_count = cycles_get_count, 1273 .ns_per_count = cycles_ns_per, 1274 }, 1275 #endif 1276 { .number = 0x023, /* STALL_FRONTEND */ 1277 .supported = pmu_8_1_events_supported, 1278 .get_count = zero_event_get_count, 1279 .ns_per_count = zero_event_ns_per, 1280 }, 1281 { .number = 0x024, /* STALL_BACKEND */ 1282 .supported = pmu_8_1_events_supported, 1283 .get_count = zero_event_get_count, 1284 .ns_per_count = zero_event_ns_per, 1285 }, 1286 { .number = 0x03c, /* STALL */ 1287 .supported = pmu_8_4_events_supported, 1288 .get_count = zero_event_get_count, 1289 .ns_per_count = zero_event_ns_per, 1290 }, 1291 }; 1292 1293 /* 1294 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of 1295 * events (i.e. the statistical profiling extension), this implementation 1296 * should first be updated to something sparse instead of the current 1297 * supported_event_map[] array. 1298 */ 1299 #define MAX_EVENT_ID 0x3c 1300 #define UNSUPPORTED_EVENT UINT16_MAX 1301 static uint16_t supported_event_map[MAX_EVENT_ID + 1]; 1302 1303 /* 1304 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map 1305 * of ARM event numbers to indices in our pm_events array. 1306 * 1307 * Note: Events in the 0x40XX range are not currently supported. 1308 */ 1309 void pmu_init(ARMCPU *cpu) 1310 { 1311 unsigned int i; 1312 1313 /* 1314 * Empty supported_event_map and cpu->pmceid[01] before adding supported 1315 * events to them 1316 */ 1317 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { 1318 supported_event_map[i] = UNSUPPORTED_EVENT; 1319 } 1320 cpu->pmceid0 = 0; 1321 cpu->pmceid1 = 0; 1322 1323 for (i = 0; i < ARRAY_SIZE(pm_events); i++) { 1324 const pm_event *cnt = &pm_events[i]; 1325 assert(cnt->number <= MAX_EVENT_ID); 1326 /* We do not currently support events in the 0x40xx range */ 1327 assert(cnt->number <= 0x3f); 1328 1329 if (cnt->supported(&cpu->env)) { 1330 supported_event_map[cnt->number] = i; 1331 uint64_t event_mask = 1ULL << (cnt->number & 0x1f); 1332 if (cnt->number & 0x20) { 1333 cpu->pmceid1 |= event_mask; 1334 } else { 1335 cpu->pmceid0 |= event_mask; 1336 } 1337 } 1338 } 1339 } 1340 1341 /* 1342 * Check at runtime whether a PMU event is supported for the current machine 1343 */ 1344 static bool event_supported(uint16_t number) 1345 { 1346 if (number > MAX_EVENT_ID) { 1347 return false; 1348 } 1349 return supported_event_map[number] != UNSUPPORTED_EVENT; 1350 } 1351 1352 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, 1353 bool isread) 1354 { 1355 /* Performance monitor registers user accessibility is controlled 1356 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable 1357 * trapping to EL2 or EL3 for other accesses. 1358 */ 1359 int el = arm_current_el(env); 1360 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 1361 1362 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { 1363 return CP_ACCESS_TRAP; 1364 } 1365 if (el < 2 && (mdcr_el2 & MDCR_TPM)) { 1366 return CP_ACCESS_TRAP_EL2; 1367 } 1368 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 1369 return CP_ACCESS_TRAP_EL3; 1370 } 1371 1372 return CP_ACCESS_OK; 1373 } 1374 1375 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, 1376 const ARMCPRegInfo *ri, 1377 bool isread) 1378 { 1379 /* ER: event counter read trap control */ 1380 if (arm_feature(env, ARM_FEATURE_V8) 1381 && arm_current_el(env) == 0 1382 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 1383 && isread) { 1384 return CP_ACCESS_OK; 1385 } 1386 1387 return pmreg_access(env, ri, isread); 1388 } 1389 1390 static CPAccessResult pmreg_access_swinc(CPUARMState *env, 1391 const ARMCPRegInfo *ri, 1392 bool isread) 1393 { 1394 /* SW: software increment write trap control */ 1395 if (arm_feature(env, ARM_FEATURE_V8) 1396 && arm_current_el(env) == 0 1397 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 1398 && !isread) { 1399 return CP_ACCESS_OK; 1400 } 1401 1402 return pmreg_access(env, ri, isread); 1403 } 1404 1405 static CPAccessResult pmreg_access_selr(CPUARMState *env, 1406 const ARMCPRegInfo *ri, 1407 bool isread) 1408 { 1409 /* ER: event counter read trap control */ 1410 if (arm_feature(env, ARM_FEATURE_V8) 1411 && arm_current_el(env) == 0 1412 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { 1413 return CP_ACCESS_OK; 1414 } 1415 1416 return pmreg_access(env, ri, isread); 1417 } 1418 1419 static CPAccessResult pmreg_access_ccntr(CPUARMState *env, 1420 const ARMCPRegInfo *ri, 1421 bool isread) 1422 { 1423 /* CR: cycle counter read trap control */ 1424 if (arm_feature(env, ARM_FEATURE_V8) 1425 && arm_current_el(env) == 0 1426 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 1427 && isread) { 1428 return CP_ACCESS_OK; 1429 } 1430 1431 return pmreg_access(env, ri, isread); 1432 } 1433 1434 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using 1435 * the current EL, security state, and register configuration. 1436 */ 1437 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) 1438 { 1439 uint64_t filter; 1440 bool e, p, u, nsk, nsu, nsh, m; 1441 bool enabled, prohibited, filtered; 1442 bool secure = arm_is_secure(env); 1443 int el = arm_current_el(env); 1444 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 1445 uint8_t hpmn = mdcr_el2 & MDCR_HPMN; 1446 1447 if (!arm_feature(env, ARM_FEATURE_PMU)) { 1448 return false; 1449 } 1450 1451 if (!arm_feature(env, ARM_FEATURE_EL2) || 1452 (counter < hpmn || counter == 31)) { 1453 e = env->cp15.c9_pmcr & PMCRE; 1454 } else { 1455 e = mdcr_el2 & MDCR_HPME; 1456 } 1457 enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); 1458 1459 if (!secure) { 1460 if (el == 2 && (counter < hpmn || counter == 31)) { 1461 prohibited = mdcr_el2 & MDCR_HPMD; 1462 } else { 1463 prohibited = false; 1464 } 1465 } else { 1466 prohibited = arm_feature(env, ARM_FEATURE_EL3) && 1467 !(env->cp15.mdcr_el3 & MDCR_SPME); 1468 } 1469 1470 if (prohibited && counter == 31) { 1471 prohibited = env->cp15.c9_pmcr & PMCRDP; 1472 } 1473 1474 if (counter == 31) { 1475 filter = env->cp15.pmccfiltr_el0; 1476 } else { 1477 filter = env->cp15.c14_pmevtyper[counter]; 1478 } 1479 1480 p = filter & PMXEVTYPER_P; 1481 u = filter & PMXEVTYPER_U; 1482 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); 1483 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); 1484 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); 1485 m = arm_el_is_aa64(env, 1) && 1486 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); 1487 1488 if (el == 0) { 1489 filtered = secure ? u : u != nsu; 1490 } else if (el == 1) { 1491 filtered = secure ? p : p != nsk; 1492 } else if (el == 2) { 1493 filtered = !nsh; 1494 } else { /* EL3 */ 1495 filtered = m != p; 1496 } 1497 1498 if (counter != 31) { 1499 /* 1500 * If not checking PMCCNTR, ensure the counter is setup to an event we 1501 * support 1502 */ 1503 uint16_t event = filter & PMXEVTYPER_EVTCOUNT; 1504 if (!event_supported(event)) { 1505 return false; 1506 } 1507 } 1508 1509 return enabled && !prohibited && !filtered; 1510 } 1511 1512 static void pmu_update_irq(CPUARMState *env) 1513 { 1514 ARMCPU *cpu = env_archcpu(env); 1515 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && 1516 (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); 1517 } 1518 1519 /* 1520 * Ensure c15_ccnt is the guest-visible count so that operations such as 1521 * enabling/disabling the counter or filtering, modifying the count itself, 1522 * etc. can be done logically. This is essentially a no-op if the counter is 1523 * not enabled at the time of the call. 1524 */ 1525 static void pmccntr_op_start(CPUARMState *env) 1526 { 1527 uint64_t cycles = cycles_get_count(env); 1528 1529 if (pmu_counter_enabled(env, 31)) { 1530 uint64_t eff_cycles = cycles; 1531 if (env->cp15.c9_pmcr & PMCRD) { 1532 /* Increment once every 64 processor clock cycles */ 1533 eff_cycles /= 64; 1534 } 1535 1536 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; 1537 1538 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ 1539 1ull << 63 : 1ull << 31; 1540 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { 1541 env->cp15.c9_pmovsr |= (1 << 31); 1542 pmu_update_irq(env); 1543 } 1544 1545 env->cp15.c15_ccnt = new_pmccntr; 1546 } 1547 env->cp15.c15_ccnt_delta = cycles; 1548 } 1549 1550 /* 1551 * If PMCCNTR is enabled, recalculate the delta between the clock and the 1552 * guest-visible count. A call to pmccntr_op_finish should follow every call to 1553 * pmccntr_op_start. 1554 */ 1555 static void pmccntr_op_finish(CPUARMState *env) 1556 { 1557 if (pmu_counter_enabled(env, 31)) { 1558 #ifndef CONFIG_USER_ONLY 1559 /* Calculate when the counter will next overflow */ 1560 uint64_t remaining_cycles = -env->cp15.c15_ccnt; 1561 if (!(env->cp15.c9_pmcr & PMCRLC)) { 1562 remaining_cycles = (uint32_t)remaining_cycles; 1563 } 1564 int64_t overflow_in = cycles_ns_per(remaining_cycles); 1565 1566 if (overflow_in > 0) { 1567 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1568 overflow_in; 1569 ARMCPU *cpu = env_archcpu(env); 1570 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1571 } 1572 #endif 1573 1574 uint64_t prev_cycles = env->cp15.c15_ccnt_delta; 1575 if (env->cp15.c9_pmcr & PMCRD) { 1576 /* Increment once every 64 processor clock cycles */ 1577 prev_cycles /= 64; 1578 } 1579 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; 1580 } 1581 } 1582 1583 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) 1584 { 1585 1586 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1587 uint64_t count = 0; 1588 if (event_supported(event)) { 1589 uint16_t event_idx = supported_event_map[event]; 1590 count = pm_events[event_idx].get_count(env); 1591 } 1592 1593 if (pmu_counter_enabled(env, counter)) { 1594 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; 1595 1596 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) { 1597 env->cp15.c9_pmovsr |= (1 << counter); 1598 pmu_update_irq(env); 1599 } 1600 env->cp15.c14_pmevcntr[counter] = new_pmevcntr; 1601 } 1602 env->cp15.c14_pmevcntr_delta[counter] = count; 1603 } 1604 1605 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) 1606 { 1607 if (pmu_counter_enabled(env, counter)) { 1608 #ifndef CONFIG_USER_ONLY 1609 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1610 uint16_t event_idx = supported_event_map[event]; 1611 uint64_t delta = UINT32_MAX - 1612 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1; 1613 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta); 1614 1615 if (overflow_in > 0) { 1616 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1617 overflow_in; 1618 ARMCPU *cpu = env_archcpu(env); 1619 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1620 } 1621 #endif 1622 1623 env->cp15.c14_pmevcntr_delta[counter] -= 1624 env->cp15.c14_pmevcntr[counter]; 1625 } 1626 } 1627 1628 void pmu_op_start(CPUARMState *env) 1629 { 1630 unsigned int i; 1631 pmccntr_op_start(env); 1632 for (i = 0; i < pmu_num_counters(env); i++) { 1633 pmevcntr_op_start(env, i); 1634 } 1635 } 1636 1637 void pmu_op_finish(CPUARMState *env) 1638 { 1639 unsigned int i; 1640 pmccntr_op_finish(env); 1641 for (i = 0; i < pmu_num_counters(env); i++) { 1642 pmevcntr_op_finish(env, i); 1643 } 1644 } 1645 1646 void pmu_pre_el_change(ARMCPU *cpu, void *ignored) 1647 { 1648 pmu_op_start(&cpu->env); 1649 } 1650 1651 void pmu_post_el_change(ARMCPU *cpu, void *ignored) 1652 { 1653 pmu_op_finish(&cpu->env); 1654 } 1655 1656 void arm_pmu_timer_cb(void *opaque) 1657 { 1658 ARMCPU *cpu = opaque; 1659 1660 /* 1661 * Update all the counter values based on the current underlying counts, 1662 * triggering interrupts to be raised, if necessary. pmu_op_finish() also 1663 * has the effect of setting the cpu->pmu_timer to the next earliest time a 1664 * counter may expire. 1665 */ 1666 pmu_op_start(&cpu->env); 1667 pmu_op_finish(&cpu->env); 1668 } 1669 1670 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1671 uint64_t value) 1672 { 1673 pmu_op_start(env); 1674 1675 if (value & PMCRC) { 1676 /* The counter has been reset */ 1677 env->cp15.c15_ccnt = 0; 1678 } 1679 1680 if (value & PMCRP) { 1681 unsigned int i; 1682 for (i = 0; i < pmu_num_counters(env); i++) { 1683 env->cp15.c14_pmevcntr[i] = 0; 1684 } 1685 } 1686 1687 env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK; 1688 env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK); 1689 1690 pmu_op_finish(env); 1691 } 1692 1693 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, 1694 uint64_t value) 1695 { 1696 unsigned int i; 1697 for (i = 0; i < pmu_num_counters(env); i++) { 1698 /* Increment a counter's count iff: */ 1699 if ((value & (1 << i)) && /* counter's bit is set */ 1700 /* counter is enabled and not filtered */ 1701 pmu_counter_enabled(env, i) && 1702 /* counter is SW_INCR */ 1703 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { 1704 pmevcntr_op_start(env, i); 1705 1706 /* 1707 * Detect if this write causes an overflow since we can't predict 1708 * PMSWINC overflows like we can for other events 1709 */ 1710 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; 1711 1712 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { 1713 env->cp15.c9_pmovsr |= (1 << i); 1714 pmu_update_irq(env); 1715 } 1716 1717 env->cp15.c14_pmevcntr[i] = new_pmswinc; 1718 1719 pmevcntr_op_finish(env, i); 1720 } 1721 } 1722 } 1723 1724 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1725 { 1726 uint64_t ret; 1727 pmccntr_op_start(env); 1728 ret = env->cp15.c15_ccnt; 1729 pmccntr_op_finish(env); 1730 return ret; 1731 } 1732 1733 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1734 uint64_t value) 1735 { 1736 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and 1737 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the 1738 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are 1739 * accessed. 1740 */ 1741 env->cp15.c9_pmselr = value & 0x1f; 1742 } 1743 1744 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1745 uint64_t value) 1746 { 1747 pmccntr_op_start(env); 1748 env->cp15.c15_ccnt = value; 1749 pmccntr_op_finish(env); 1750 } 1751 1752 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, 1753 uint64_t value) 1754 { 1755 uint64_t cur_val = pmccntr_read(env, NULL); 1756 1757 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); 1758 } 1759 1760 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1761 uint64_t value) 1762 { 1763 pmccntr_op_start(env); 1764 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; 1765 pmccntr_op_finish(env); 1766 } 1767 1768 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, 1769 uint64_t value) 1770 { 1771 pmccntr_op_start(env); 1772 /* M is not accessible from AArch32 */ 1773 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | 1774 (value & PMCCFILTR); 1775 pmccntr_op_finish(env); 1776 } 1777 1778 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) 1779 { 1780 /* M is not visible in AArch32 */ 1781 return env->cp15.pmccfiltr_el0 & PMCCFILTR; 1782 } 1783 1784 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1785 uint64_t value) 1786 { 1787 value &= pmu_counter_mask(env); 1788 env->cp15.c9_pmcnten |= value; 1789 } 1790 1791 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1792 uint64_t value) 1793 { 1794 value &= pmu_counter_mask(env); 1795 env->cp15.c9_pmcnten &= ~value; 1796 } 1797 1798 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1799 uint64_t value) 1800 { 1801 value &= pmu_counter_mask(env); 1802 env->cp15.c9_pmovsr &= ~value; 1803 pmu_update_irq(env); 1804 } 1805 1806 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1807 uint64_t value) 1808 { 1809 value &= pmu_counter_mask(env); 1810 env->cp15.c9_pmovsr |= value; 1811 pmu_update_irq(env); 1812 } 1813 1814 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1815 uint64_t value, const uint8_t counter) 1816 { 1817 if (counter == 31) { 1818 pmccfiltr_write(env, ri, value); 1819 } else if (counter < pmu_num_counters(env)) { 1820 pmevcntr_op_start(env, counter); 1821 1822 /* 1823 * If this counter's event type is changing, store the current 1824 * underlying count for the new type in c14_pmevcntr_delta[counter] so 1825 * pmevcntr_op_finish has the correct baseline when it converts back to 1826 * a delta. 1827 */ 1828 uint16_t old_event = env->cp15.c14_pmevtyper[counter] & 1829 PMXEVTYPER_EVTCOUNT; 1830 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; 1831 if (old_event != new_event) { 1832 uint64_t count = 0; 1833 if (event_supported(new_event)) { 1834 uint16_t event_idx = supported_event_map[new_event]; 1835 count = pm_events[event_idx].get_count(env); 1836 } 1837 env->cp15.c14_pmevcntr_delta[counter] = count; 1838 } 1839 1840 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; 1841 pmevcntr_op_finish(env, counter); 1842 } 1843 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when 1844 * PMSELR value is equal to or greater than the number of implemented 1845 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. 1846 */ 1847 } 1848 1849 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, 1850 const uint8_t counter) 1851 { 1852 if (counter == 31) { 1853 return env->cp15.pmccfiltr_el0; 1854 } else if (counter < pmu_num_counters(env)) { 1855 return env->cp15.c14_pmevtyper[counter]; 1856 } else { 1857 /* 1858 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER 1859 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). 1860 */ 1861 return 0; 1862 } 1863 } 1864 1865 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1866 uint64_t value) 1867 { 1868 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1869 pmevtyper_write(env, ri, value, counter); 1870 } 1871 1872 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1873 uint64_t value) 1874 { 1875 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1876 env->cp15.c14_pmevtyper[counter] = value; 1877 1878 /* 1879 * pmevtyper_rawwrite is called between a pair of pmu_op_start and 1880 * pmu_op_finish calls when loading saved state for a migration. Because 1881 * we're potentially updating the type of event here, the value written to 1882 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a 1883 * different counter type. Therefore, we need to set this value to the 1884 * current count for the counter type we're writing so that pmu_op_finish 1885 * has the correct count for its calculation. 1886 */ 1887 uint16_t event = value & PMXEVTYPER_EVTCOUNT; 1888 if (event_supported(event)) { 1889 uint16_t event_idx = supported_event_map[event]; 1890 env->cp15.c14_pmevcntr_delta[counter] = 1891 pm_events[event_idx].get_count(env); 1892 } 1893 } 1894 1895 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1896 { 1897 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1898 return pmevtyper_read(env, ri, counter); 1899 } 1900 1901 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1902 uint64_t value) 1903 { 1904 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); 1905 } 1906 1907 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) 1908 { 1909 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); 1910 } 1911 1912 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1913 uint64_t value, uint8_t counter) 1914 { 1915 if (counter < pmu_num_counters(env)) { 1916 pmevcntr_op_start(env, counter); 1917 env->cp15.c14_pmevcntr[counter] = value; 1918 pmevcntr_op_finish(env, counter); 1919 } 1920 /* 1921 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1922 * are CONSTRAINED UNPREDICTABLE. 1923 */ 1924 } 1925 1926 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, 1927 uint8_t counter) 1928 { 1929 if (counter < pmu_num_counters(env)) { 1930 uint64_t ret; 1931 pmevcntr_op_start(env, counter); 1932 ret = env->cp15.c14_pmevcntr[counter]; 1933 pmevcntr_op_finish(env, counter); 1934 return ret; 1935 } else { 1936 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1937 * are CONSTRAINED UNPREDICTABLE. */ 1938 return 0; 1939 } 1940 } 1941 1942 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1943 uint64_t value) 1944 { 1945 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1946 pmevcntr_write(env, ri, value, counter); 1947 } 1948 1949 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1950 { 1951 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1952 return pmevcntr_read(env, ri, counter); 1953 } 1954 1955 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1956 uint64_t value) 1957 { 1958 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1959 assert(counter < pmu_num_counters(env)); 1960 env->cp15.c14_pmevcntr[counter] = value; 1961 pmevcntr_write(env, ri, value, counter); 1962 } 1963 1964 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) 1965 { 1966 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1967 assert(counter < pmu_num_counters(env)); 1968 return env->cp15.c14_pmevcntr[counter]; 1969 } 1970 1971 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1972 uint64_t value) 1973 { 1974 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); 1975 } 1976 1977 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1978 { 1979 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); 1980 } 1981 1982 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1983 uint64_t value) 1984 { 1985 if (arm_feature(env, ARM_FEATURE_V8)) { 1986 env->cp15.c9_pmuserenr = value & 0xf; 1987 } else { 1988 env->cp15.c9_pmuserenr = value & 1; 1989 } 1990 } 1991 1992 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1993 uint64_t value) 1994 { 1995 /* We have no event counters so only the C bit can be changed */ 1996 value &= pmu_counter_mask(env); 1997 env->cp15.c9_pminten |= value; 1998 pmu_update_irq(env); 1999 } 2000 2001 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2002 uint64_t value) 2003 { 2004 value &= pmu_counter_mask(env); 2005 env->cp15.c9_pminten &= ~value; 2006 pmu_update_irq(env); 2007 } 2008 2009 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 2010 uint64_t value) 2011 { 2012 /* Note that even though the AArch64 view of this register has bits 2013 * [10:0] all RES0 we can only mask the bottom 5, to comply with the 2014 * architectural requirements for bits which are RES0 only in some 2015 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 2016 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) 2017 */ 2018 raw_write(env, ri, value & ~0x1FULL); 2019 } 2020 2021 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 2022 { 2023 /* Begin with base v8.0 state. */ 2024 uint32_t valid_mask = 0x3fff; 2025 ARMCPU *cpu = env_archcpu(env); 2026 2027 if (ri->state == ARM_CP_STATE_AA64) { 2028 if (arm_feature(env, ARM_FEATURE_AARCH64) && 2029 !cpu_isar_feature(aa64_aa32_el1, cpu)) { 2030 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ 2031 } 2032 valid_mask &= ~SCR_NET; 2033 2034 if (cpu_isar_feature(aa64_lor, cpu)) { 2035 valid_mask |= SCR_TLOR; 2036 } 2037 if (cpu_isar_feature(aa64_pauth, cpu)) { 2038 valid_mask |= SCR_API | SCR_APK; 2039 } 2040 if (cpu_isar_feature(aa64_sel2, cpu)) { 2041 valid_mask |= SCR_EEL2; 2042 } 2043 if (cpu_isar_feature(aa64_mte, cpu)) { 2044 valid_mask |= SCR_ATA; 2045 } 2046 } else { 2047 valid_mask &= ~(SCR_RW | SCR_ST); 2048 } 2049 2050 if (!arm_feature(env, ARM_FEATURE_EL2)) { 2051 valid_mask &= ~SCR_HCE; 2052 2053 /* On ARMv7, SMD (or SCD as it is called in v7) is only 2054 * supported if EL2 exists. The bit is UNK/SBZP when 2055 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero 2056 * when EL2 is unavailable. 2057 * On ARMv8, this bit is always available. 2058 */ 2059 if (arm_feature(env, ARM_FEATURE_V7) && 2060 !arm_feature(env, ARM_FEATURE_V8)) { 2061 valid_mask &= ~SCR_SMD; 2062 } 2063 } 2064 2065 /* Clear all-context RES0 bits. */ 2066 value &= valid_mask; 2067 raw_write(env, ri, value); 2068 } 2069 2070 static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2071 { 2072 /* 2073 * scr_write will set the RES1 bits on an AArch64-only CPU. 2074 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise. 2075 */ 2076 scr_write(env, ri, 0); 2077 } 2078 2079 static CPAccessResult access_aa64_tid2(CPUARMState *env, 2080 const ARMCPRegInfo *ri, 2081 bool isread) 2082 { 2083 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) { 2084 return CP_ACCESS_TRAP_EL2; 2085 } 2086 2087 return CP_ACCESS_OK; 2088 } 2089 2090 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2091 { 2092 ARMCPU *cpu = env_archcpu(env); 2093 2094 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 2095 * bank 2096 */ 2097 uint32_t index = A32_BANKED_REG_GET(env, csselr, 2098 ri->secure & ARM_CP_SECSTATE_S); 2099 2100 return cpu->ccsidr[index]; 2101 } 2102 2103 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2104 uint64_t value) 2105 { 2106 raw_write(env, ri, value & 0xf); 2107 } 2108 2109 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2110 { 2111 CPUState *cs = env_cpu(env); 2112 bool el1 = arm_current_el(env) == 1; 2113 uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0; 2114 uint64_t ret = 0; 2115 2116 if (hcr_el2 & HCR_IMO) { 2117 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { 2118 ret |= CPSR_I; 2119 } 2120 } else { 2121 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { 2122 ret |= CPSR_I; 2123 } 2124 } 2125 2126 if (hcr_el2 & HCR_FMO) { 2127 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { 2128 ret |= CPSR_F; 2129 } 2130 } else { 2131 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { 2132 ret |= CPSR_F; 2133 } 2134 } 2135 2136 /* External aborts are not possible in QEMU so A bit is always clear */ 2137 return ret; 2138 } 2139 2140 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, 2141 bool isread) 2142 { 2143 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) { 2144 return CP_ACCESS_TRAP_EL2; 2145 } 2146 2147 return CP_ACCESS_OK; 2148 } 2149 2150 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, 2151 bool isread) 2152 { 2153 if (arm_feature(env, ARM_FEATURE_V8)) { 2154 return access_aa64_tid1(env, ri, isread); 2155 } 2156 2157 return CP_ACCESS_OK; 2158 } 2159 2160 static const ARMCPRegInfo v7_cp_reginfo[] = { 2161 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ 2162 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 2163 .access = PL1_W, .type = ARM_CP_NOP }, 2164 /* Performance monitors are implementation defined in v7, 2165 * but with an ARM recommended set of registers, which we 2166 * follow. 2167 * 2168 * Performance registers fall into three categories: 2169 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) 2170 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) 2171 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) 2172 * For the cases controlled by PMUSERENR we must set .access to PL0_RW 2173 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. 2174 */ 2175 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, 2176 .access = PL0_RW, .type = ARM_CP_ALIAS, 2177 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 2178 .writefn = pmcntenset_write, 2179 .accessfn = pmreg_access, 2180 .raw_writefn = raw_write }, 2181 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, 2182 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, 2183 .access = PL0_RW, .accessfn = pmreg_access, 2184 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, 2185 .writefn = pmcntenset_write, .raw_writefn = raw_write }, 2186 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, 2187 .access = PL0_RW, 2188 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 2189 .accessfn = pmreg_access, 2190 .writefn = pmcntenclr_write, 2191 .type = ARM_CP_ALIAS }, 2192 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, 2193 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, 2194 .access = PL0_RW, .accessfn = pmreg_access, 2195 .type = ARM_CP_ALIAS, 2196 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), 2197 .writefn = pmcntenclr_write }, 2198 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, 2199 .access = PL0_RW, .type = ARM_CP_IO, 2200 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2201 .accessfn = pmreg_access, 2202 .writefn = pmovsr_write, 2203 .raw_writefn = raw_write }, 2204 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, 2205 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, 2206 .access = PL0_RW, .accessfn = pmreg_access, 2207 .type = ARM_CP_ALIAS | ARM_CP_IO, 2208 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2209 .writefn = pmovsr_write, 2210 .raw_writefn = raw_write }, 2211 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, 2212 .access = PL0_W, .accessfn = pmreg_access_swinc, 2213 .type = ARM_CP_NO_RAW | ARM_CP_IO, 2214 .writefn = pmswinc_write }, 2215 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, 2216 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, 2217 .access = PL0_W, .accessfn = pmreg_access_swinc, 2218 .type = ARM_CP_NO_RAW | ARM_CP_IO, 2219 .writefn = pmswinc_write }, 2220 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, 2221 .access = PL0_RW, .type = ARM_CP_ALIAS, 2222 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), 2223 .accessfn = pmreg_access_selr, .writefn = pmselr_write, 2224 .raw_writefn = raw_write}, 2225 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, 2226 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, 2227 .access = PL0_RW, .accessfn = pmreg_access_selr, 2228 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), 2229 .writefn = pmselr_write, .raw_writefn = raw_write, }, 2230 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, 2231 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, 2232 .readfn = pmccntr_read, .writefn = pmccntr_write32, 2233 .accessfn = pmreg_access_ccntr }, 2234 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, 2235 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, 2236 .access = PL0_RW, .accessfn = pmreg_access_ccntr, 2237 .type = ARM_CP_IO, 2238 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), 2239 .readfn = pmccntr_read, .writefn = pmccntr_write, 2240 .raw_readfn = raw_read, .raw_writefn = raw_write, }, 2241 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, 2242 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, 2243 .access = PL0_RW, .accessfn = pmreg_access, 2244 .type = ARM_CP_ALIAS | ARM_CP_IO, 2245 .resetvalue = 0, }, 2246 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, 2247 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, 2248 .writefn = pmccfiltr_write, .raw_writefn = raw_write, 2249 .access = PL0_RW, .accessfn = pmreg_access, 2250 .type = ARM_CP_IO, 2251 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), 2252 .resetvalue = 0, }, 2253 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, 2254 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2255 .accessfn = pmreg_access, 2256 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 2257 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, 2258 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, 2259 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2260 .accessfn = pmreg_access, 2261 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 2262 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, 2263 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2264 .accessfn = pmreg_access_xevcntr, 2265 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 2266 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, 2267 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, 2268 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2269 .accessfn = pmreg_access_xevcntr, 2270 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 2271 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, 2272 .access = PL0_R | PL1_RW, .accessfn = access_tpm, 2273 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), 2274 .resetvalue = 0, 2275 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2276 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, 2277 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, 2278 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 2279 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 2280 .resetvalue = 0, 2281 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2282 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, 2283 .access = PL1_RW, .accessfn = access_tpm, 2284 .type = ARM_CP_ALIAS | ARM_CP_IO, 2285 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), 2286 .resetvalue = 0, 2287 .writefn = pmintenset_write, .raw_writefn = raw_write }, 2288 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, 2289 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, 2290 .access = PL1_RW, .accessfn = access_tpm, 2291 .type = ARM_CP_IO, 2292 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2293 .writefn = pmintenset_write, .raw_writefn = raw_write, 2294 .resetvalue = 0x0 }, 2295 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, 2296 .access = PL1_RW, .accessfn = access_tpm, 2297 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW, 2298 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2299 .writefn = pmintenclr_write, }, 2300 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, 2301 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, 2302 .access = PL1_RW, .accessfn = access_tpm, 2303 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW, 2304 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2305 .writefn = pmintenclr_write }, 2306 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, 2307 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, 2308 .access = PL1_R, 2309 .accessfn = access_aa64_tid2, 2310 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, 2311 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, 2312 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, 2313 .access = PL1_RW, 2314 .accessfn = access_aa64_tid2, 2315 .writefn = csselr_write, .resetvalue = 0, 2316 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), 2317 offsetof(CPUARMState, cp15.csselr_ns) } }, 2318 /* Auxiliary ID register: this actually has an IMPDEF value but for now 2319 * just RAZ for all cores: 2320 */ 2321 { .name = "AIDR", .state = ARM_CP_STATE_BOTH, 2322 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, 2323 .access = PL1_R, .type = ARM_CP_CONST, 2324 .accessfn = access_aa64_tid1, 2325 .resetvalue = 0 }, 2326 /* Auxiliary fault status registers: these also are IMPDEF, and we 2327 * choose to RAZ/WI for all cores. 2328 */ 2329 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, 2330 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, 2331 .access = PL1_RW, .accessfn = access_tvm_trvm, 2332 .type = ARM_CP_CONST, .resetvalue = 0 }, 2333 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, 2334 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, 2335 .access = PL1_RW, .accessfn = access_tvm_trvm, 2336 .type = ARM_CP_CONST, .resetvalue = 0 }, 2337 /* MAIR can just read-as-written because we don't implement caches 2338 * and so don't need to care about memory attributes. 2339 */ 2340 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, 2341 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 2342 .access = PL1_RW, .accessfn = access_tvm_trvm, 2343 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), 2344 .resetvalue = 0 }, 2345 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, 2346 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, 2347 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), 2348 .resetvalue = 0 }, 2349 /* For non-long-descriptor page tables these are PRRR and NMRR; 2350 * regardless they still act as reads-as-written for QEMU. 2351 */ 2352 /* MAIR0/1 are defined separately from their 64-bit counterpart which 2353 * allows them to assign the correct fieldoffset based on the endianness 2354 * handled in the field definitions. 2355 */ 2356 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, 2357 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 2358 .access = PL1_RW, .accessfn = access_tvm_trvm, 2359 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), 2360 offsetof(CPUARMState, cp15.mair0_ns) }, 2361 .resetfn = arm_cp_reset_ignore }, 2362 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, 2363 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, 2364 .access = PL1_RW, .accessfn = access_tvm_trvm, 2365 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), 2366 offsetof(CPUARMState, cp15.mair1_ns) }, 2367 .resetfn = arm_cp_reset_ignore }, 2368 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, 2369 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, 2370 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, 2371 /* 32 bit ITLB invalidates */ 2372 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, 2373 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2374 .writefn = tlbiall_write }, 2375 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 2376 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2377 .writefn = tlbimva_write }, 2378 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, 2379 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2380 .writefn = tlbiasid_write }, 2381 /* 32 bit DTLB invalidates */ 2382 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, 2383 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2384 .writefn = tlbiall_write }, 2385 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 2386 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2387 .writefn = tlbimva_write }, 2388 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, 2389 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2390 .writefn = tlbiasid_write }, 2391 /* 32 bit TLB invalidates */ 2392 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 2393 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2394 .writefn = tlbiall_write }, 2395 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 2396 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2397 .writefn = tlbimva_write }, 2398 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 2399 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2400 .writefn = tlbiasid_write }, 2401 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 2402 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2403 .writefn = tlbimvaa_write }, 2404 REGINFO_SENTINEL 2405 }; 2406 2407 static const ARMCPRegInfo v7mp_cp_reginfo[] = { 2408 /* 32 bit TLB invalidates, Inner Shareable */ 2409 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 2410 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2411 .writefn = tlbiall_is_write }, 2412 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 2413 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2414 .writefn = tlbimva_is_write }, 2415 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 2416 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2417 .writefn = tlbiasid_is_write }, 2418 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 2419 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2420 .writefn = tlbimvaa_is_write }, 2421 REGINFO_SENTINEL 2422 }; 2423 2424 static const ARMCPRegInfo pmovsset_cp_reginfo[] = { 2425 /* PMOVSSET is not implemented in v7 before v7ve */ 2426 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, 2427 .access = PL0_RW, .accessfn = pmreg_access, 2428 .type = ARM_CP_ALIAS | ARM_CP_IO, 2429 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2430 .writefn = pmovsset_write, 2431 .raw_writefn = raw_write }, 2432 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, 2433 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, 2434 .access = PL0_RW, .accessfn = pmreg_access, 2435 .type = ARM_CP_ALIAS | ARM_CP_IO, 2436 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2437 .writefn = pmovsset_write, 2438 .raw_writefn = raw_write }, 2439 REGINFO_SENTINEL 2440 }; 2441 2442 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2443 uint64_t value) 2444 { 2445 value &= 1; 2446 env->teecr = value; 2447 } 2448 2449 static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri, 2450 bool isread) 2451 { 2452 /* 2453 * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE 2454 * at all, so we don't need to check whether we're v8A. 2455 */ 2456 if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) && 2457 (env->cp15.hstr_el2 & HSTR_TTEE)) { 2458 return CP_ACCESS_TRAP_EL2; 2459 } 2460 return CP_ACCESS_OK; 2461 } 2462 2463 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, 2464 bool isread) 2465 { 2466 if (arm_current_el(env) == 0 && (env->teecr & 1)) { 2467 return CP_ACCESS_TRAP; 2468 } 2469 return teecr_access(env, ri, isread); 2470 } 2471 2472 static const ARMCPRegInfo t2ee_cp_reginfo[] = { 2473 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, 2474 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), 2475 .resetvalue = 0, 2476 .writefn = teecr_write, .accessfn = teecr_access }, 2477 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, 2478 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), 2479 .accessfn = teehbr_access, .resetvalue = 0 }, 2480 REGINFO_SENTINEL 2481 }; 2482 2483 static const ARMCPRegInfo v6k_cp_reginfo[] = { 2484 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, 2485 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, 2486 .access = PL0_RW, 2487 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, 2488 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, 2489 .access = PL0_RW, 2490 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), 2491 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, 2492 .resetfn = arm_cp_reset_ignore }, 2493 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, 2494 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, 2495 .access = PL0_R|PL1_W, 2496 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), 2497 .resetvalue = 0}, 2498 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, 2499 .access = PL0_R|PL1_W, 2500 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), 2501 offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, 2502 .resetfn = arm_cp_reset_ignore }, 2503 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, 2504 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, 2505 .access = PL1_RW, 2506 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, 2507 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, 2508 .access = PL1_RW, 2509 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), 2510 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, 2511 .resetvalue = 0 }, 2512 REGINFO_SENTINEL 2513 }; 2514 2515 #ifndef CONFIG_USER_ONLY 2516 2517 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, 2518 bool isread) 2519 { 2520 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. 2521 * Writable only at the highest implemented exception level. 2522 */ 2523 int el = arm_current_el(env); 2524 uint64_t hcr; 2525 uint32_t cntkctl; 2526 2527 switch (el) { 2528 case 0: 2529 hcr = arm_hcr_el2_eff(env); 2530 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2531 cntkctl = env->cp15.cnthctl_el2; 2532 } else { 2533 cntkctl = env->cp15.c14_cntkctl; 2534 } 2535 if (!extract32(cntkctl, 0, 2)) { 2536 return CP_ACCESS_TRAP; 2537 } 2538 break; 2539 case 1: 2540 if (!isread && ri->state == ARM_CP_STATE_AA32 && 2541 arm_is_secure_below_el3(env)) { 2542 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ 2543 return CP_ACCESS_TRAP_UNCATEGORIZED; 2544 } 2545 break; 2546 case 2: 2547 case 3: 2548 break; 2549 } 2550 2551 if (!isread && el < arm_highest_el(env)) { 2552 return CP_ACCESS_TRAP_UNCATEGORIZED; 2553 } 2554 2555 return CP_ACCESS_OK; 2556 } 2557 2558 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, 2559 bool isread) 2560 { 2561 unsigned int cur_el = arm_current_el(env); 2562 bool has_el2 = arm_is_el2_enabled(env); 2563 uint64_t hcr = arm_hcr_el2_eff(env); 2564 2565 switch (cur_el) { 2566 case 0: 2567 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */ 2568 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2569 return (extract32(env->cp15.cnthctl_el2, timeridx, 1) 2570 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); 2571 } 2572 2573 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */ 2574 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { 2575 return CP_ACCESS_TRAP; 2576 } 2577 2578 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */ 2579 if (hcr & HCR_E2H) { 2580 if (timeridx == GTIMER_PHYS && 2581 !extract32(env->cp15.cnthctl_el2, 10, 1)) { 2582 return CP_ACCESS_TRAP_EL2; 2583 } 2584 } else { 2585 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ 2586 if (has_el2 && timeridx == GTIMER_PHYS && 2587 !extract32(env->cp15.cnthctl_el2, 1, 1)) { 2588 return CP_ACCESS_TRAP_EL2; 2589 } 2590 } 2591 break; 2592 2593 case 1: 2594 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */ 2595 if (has_el2 && timeridx == GTIMER_PHYS && 2596 (hcr & HCR_E2H 2597 ? !extract32(env->cp15.cnthctl_el2, 10, 1) 2598 : !extract32(env->cp15.cnthctl_el2, 0, 1))) { 2599 return CP_ACCESS_TRAP_EL2; 2600 } 2601 break; 2602 } 2603 return CP_ACCESS_OK; 2604 } 2605 2606 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, 2607 bool isread) 2608 { 2609 unsigned int cur_el = arm_current_el(env); 2610 bool has_el2 = arm_is_el2_enabled(env); 2611 uint64_t hcr = arm_hcr_el2_eff(env); 2612 2613 switch (cur_el) { 2614 case 0: 2615 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2616 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */ 2617 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) 2618 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); 2619 } 2620 2621 /* 2622 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from 2623 * EL0 if EL0[PV]TEN is zero. 2624 */ 2625 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { 2626 return CP_ACCESS_TRAP; 2627 } 2628 /* fall through */ 2629 2630 case 1: 2631 if (has_el2 && timeridx == GTIMER_PHYS) { 2632 if (hcr & HCR_E2H) { 2633 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */ 2634 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { 2635 return CP_ACCESS_TRAP_EL2; 2636 } 2637 } else { 2638 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ 2639 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { 2640 return CP_ACCESS_TRAP_EL2; 2641 } 2642 } 2643 } 2644 break; 2645 } 2646 return CP_ACCESS_OK; 2647 } 2648 2649 static CPAccessResult gt_pct_access(CPUARMState *env, 2650 const ARMCPRegInfo *ri, 2651 bool isread) 2652 { 2653 return gt_counter_access(env, GTIMER_PHYS, isread); 2654 } 2655 2656 static CPAccessResult gt_vct_access(CPUARMState *env, 2657 const ARMCPRegInfo *ri, 2658 bool isread) 2659 { 2660 return gt_counter_access(env, GTIMER_VIRT, isread); 2661 } 2662 2663 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2664 bool isread) 2665 { 2666 return gt_timer_access(env, GTIMER_PHYS, isread); 2667 } 2668 2669 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2670 bool isread) 2671 { 2672 return gt_timer_access(env, GTIMER_VIRT, isread); 2673 } 2674 2675 static CPAccessResult gt_stimer_access(CPUARMState *env, 2676 const ARMCPRegInfo *ri, 2677 bool isread) 2678 { 2679 /* The AArch64 register view of the secure physical timer is 2680 * always accessible from EL3, and configurably accessible from 2681 * Secure EL1. 2682 */ 2683 switch (arm_current_el(env)) { 2684 case 1: 2685 if (!arm_is_secure(env)) { 2686 return CP_ACCESS_TRAP; 2687 } 2688 if (!(env->cp15.scr_el3 & SCR_ST)) { 2689 return CP_ACCESS_TRAP_EL3; 2690 } 2691 return CP_ACCESS_OK; 2692 case 0: 2693 case 2: 2694 return CP_ACCESS_TRAP; 2695 case 3: 2696 return CP_ACCESS_OK; 2697 default: 2698 g_assert_not_reached(); 2699 } 2700 } 2701 2702 static uint64_t gt_get_countervalue(CPUARMState *env) 2703 { 2704 ARMCPU *cpu = env_archcpu(env); 2705 2706 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu); 2707 } 2708 2709 static void gt_recalc_timer(ARMCPU *cpu, int timeridx) 2710 { 2711 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; 2712 2713 if (gt->ctl & 1) { 2714 /* Timer enabled: calculate and set current ISTATUS, irq, and 2715 * reset timer to when ISTATUS next has to change 2716 */ 2717 uint64_t offset = timeridx == GTIMER_VIRT ? 2718 cpu->env.cp15.cntvoff_el2 : 0; 2719 uint64_t count = gt_get_countervalue(&cpu->env); 2720 /* Note that this must be unsigned 64 bit arithmetic: */ 2721 int istatus = count - offset >= gt->cval; 2722 uint64_t nexttick; 2723 int irqstate; 2724 2725 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); 2726 2727 irqstate = (istatus && !(gt->ctl & 2)); 2728 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2729 2730 if (istatus) { 2731 /* Next transition is when count rolls back over to zero */ 2732 nexttick = UINT64_MAX; 2733 } else { 2734 /* Next transition is when we hit cval */ 2735 nexttick = gt->cval + offset; 2736 } 2737 /* Note that the desired next expiry time might be beyond the 2738 * signed-64-bit range of a QEMUTimer -- in this case we just 2739 * set the timer for as far in the future as possible. When the 2740 * timer expires we will reset the timer for any remaining period. 2741 */ 2742 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) { 2743 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); 2744 } else { 2745 timer_mod(cpu->gt_timer[timeridx], nexttick); 2746 } 2747 trace_arm_gt_recalc(timeridx, irqstate, nexttick); 2748 } else { 2749 /* Timer disabled: ISTATUS and timer output always clear */ 2750 gt->ctl &= ~4; 2751 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); 2752 timer_del(cpu->gt_timer[timeridx]); 2753 trace_arm_gt_recalc_disabled(timeridx); 2754 } 2755 } 2756 2757 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 2758 int timeridx) 2759 { 2760 ARMCPU *cpu = env_archcpu(env); 2761 2762 timer_del(cpu->gt_timer[timeridx]); 2763 } 2764 2765 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2766 { 2767 return gt_get_countervalue(env); 2768 } 2769 2770 static uint64_t gt_virt_cnt_offset(CPUARMState *env) 2771 { 2772 uint64_t hcr; 2773 2774 switch (arm_current_el(env)) { 2775 case 2: 2776 hcr = arm_hcr_el2_eff(env); 2777 if (hcr & HCR_E2H) { 2778 return 0; 2779 } 2780 break; 2781 case 0: 2782 hcr = arm_hcr_el2_eff(env); 2783 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2784 return 0; 2785 } 2786 break; 2787 } 2788 2789 return env->cp15.cntvoff_el2; 2790 } 2791 2792 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2793 { 2794 return gt_get_countervalue(env) - gt_virt_cnt_offset(env); 2795 } 2796 2797 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2798 int timeridx, 2799 uint64_t value) 2800 { 2801 trace_arm_gt_cval_write(timeridx, value); 2802 env->cp15.c14_timer[timeridx].cval = value; 2803 gt_recalc_timer(env_archcpu(env), timeridx); 2804 } 2805 2806 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, 2807 int timeridx) 2808 { 2809 uint64_t offset = 0; 2810 2811 switch (timeridx) { 2812 case GTIMER_VIRT: 2813 case GTIMER_HYPVIRT: 2814 offset = gt_virt_cnt_offset(env); 2815 break; 2816 } 2817 2818 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - 2819 (gt_get_countervalue(env) - offset)); 2820 } 2821 2822 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2823 int timeridx, 2824 uint64_t value) 2825 { 2826 uint64_t offset = 0; 2827 2828 switch (timeridx) { 2829 case GTIMER_VIRT: 2830 case GTIMER_HYPVIRT: 2831 offset = gt_virt_cnt_offset(env); 2832 break; 2833 } 2834 2835 trace_arm_gt_tval_write(timeridx, value); 2836 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 2837 sextract64(value, 0, 32); 2838 gt_recalc_timer(env_archcpu(env), timeridx); 2839 } 2840 2841 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2842 int timeridx, 2843 uint64_t value) 2844 { 2845 ARMCPU *cpu = env_archcpu(env); 2846 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 2847 2848 trace_arm_gt_ctl_write(timeridx, value); 2849 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); 2850 if ((oldval ^ value) & 1) { 2851 /* Enable toggled */ 2852 gt_recalc_timer(cpu, timeridx); 2853 } else if ((oldval ^ value) & 2) { 2854 /* IMASK toggled: don't need to recalculate, 2855 * just set the interrupt line based on ISTATUS 2856 */ 2857 int irqstate = (oldval & 4) && !(value & 2); 2858 2859 trace_arm_gt_imask_toggle(timeridx, irqstate); 2860 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2861 } 2862 } 2863 2864 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2865 { 2866 gt_timer_reset(env, ri, GTIMER_PHYS); 2867 } 2868 2869 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2870 uint64_t value) 2871 { 2872 gt_cval_write(env, ri, GTIMER_PHYS, value); 2873 } 2874 2875 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2876 { 2877 return gt_tval_read(env, ri, GTIMER_PHYS); 2878 } 2879 2880 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2881 uint64_t value) 2882 { 2883 gt_tval_write(env, ri, GTIMER_PHYS, value); 2884 } 2885 2886 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2887 uint64_t value) 2888 { 2889 gt_ctl_write(env, ri, GTIMER_PHYS, value); 2890 } 2891 2892 static int gt_phys_redir_timeridx(CPUARMState *env) 2893 { 2894 switch (arm_mmu_idx(env)) { 2895 case ARMMMUIdx_E20_0: 2896 case ARMMMUIdx_E20_2: 2897 case ARMMMUIdx_E20_2_PAN: 2898 case ARMMMUIdx_SE20_0: 2899 case ARMMMUIdx_SE20_2: 2900 case ARMMMUIdx_SE20_2_PAN: 2901 return GTIMER_HYP; 2902 default: 2903 return GTIMER_PHYS; 2904 } 2905 } 2906 2907 static int gt_virt_redir_timeridx(CPUARMState *env) 2908 { 2909 switch (arm_mmu_idx(env)) { 2910 case ARMMMUIdx_E20_0: 2911 case ARMMMUIdx_E20_2: 2912 case ARMMMUIdx_E20_2_PAN: 2913 case ARMMMUIdx_SE20_0: 2914 case ARMMMUIdx_SE20_2: 2915 case ARMMMUIdx_SE20_2_PAN: 2916 return GTIMER_HYPVIRT; 2917 default: 2918 return GTIMER_VIRT; 2919 } 2920 } 2921 2922 static uint64_t gt_phys_redir_cval_read(CPUARMState *env, 2923 const ARMCPRegInfo *ri) 2924 { 2925 int timeridx = gt_phys_redir_timeridx(env); 2926 return env->cp15.c14_timer[timeridx].cval; 2927 } 2928 2929 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2930 uint64_t value) 2931 { 2932 int timeridx = gt_phys_redir_timeridx(env); 2933 gt_cval_write(env, ri, timeridx, value); 2934 } 2935 2936 static uint64_t gt_phys_redir_tval_read(CPUARMState *env, 2937 const ARMCPRegInfo *ri) 2938 { 2939 int timeridx = gt_phys_redir_timeridx(env); 2940 return gt_tval_read(env, ri, timeridx); 2941 } 2942 2943 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2944 uint64_t value) 2945 { 2946 int timeridx = gt_phys_redir_timeridx(env); 2947 gt_tval_write(env, ri, timeridx, value); 2948 } 2949 2950 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env, 2951 const ARMCPRegInfo *ri) 2952 { 2953 int timeridx = gt_phys_redir_timeridx(env); 2954 return env->cp15.c14_timer[timeridx].ctl; 2955 } 2956 2957 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2958 uint64_t value) 2959 { 2960 int timeridx = gt_phys_redir_timeridx(env); 2961 gt_ctl_write(env, ri, timeridx, value); 2962 } 2963 2964 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2965 { 2966 gt_timer_reset(env, ri, GTIMER_VIRT); 2967 } 2968 2969 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2970 uint64_t value) 2971 { 2972 gt_cval_write(env, ri, GTIMER_VIRT, value); 2973 } 2974 2975 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2976 { 2977 return gt_tval_read(env, ri, GTIMER_VIRT); 2978 } 2979 2980 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2981 uint64_t value) 2982 { 2983 gt_tval_write(env, ri, GTIMER_VIRT, value); 2984 } 2985 2986 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2987 uint64_t value) 2988 { 2989 gt_ctl_write(env, ri, GTIMER_VIRT, value); 2990 } 2991 2992 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 2993 uint64_t value) 2994 { 2995 ARMCPU *cpu = env_archcpu(env); 2996 2997 trace_arm_gt_cntvoff_write(value); 2998 raw_write(env, ri, value); 2999 gt_recalc_timer(cpu, GTIMER_VIRT); 3000 } 3001 3002 static uint64_t gt_virt_redir_cval_read(CPUARMState *env, 3003 const ARMCPRegInfo *ri) 3004 { 3005 int timeridx = gt_virt_redir_timeridx(env); 3006 return env->cp15.c14_timer[timeridx].cval; 3007 } 3008 3009 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3010 uint64_t value) 3011 { 3012 int timeridx = gt_virt_redir_timeridx(env); 3013 gt_cval_write(env, ri, timeridx, value); 3014 } 3015 3016 static uint64_t gt_virt_redir_tval_read(CPUARMState *env, 3017 const ARMCPRegInfo *ri) 3018 { 3019 int timeridx = gt_virt_redir_timeridx(env); 3020 return gt_tval_read(env, ri, timeridx); 3021 } 3022 3023 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3024 uint64_t value) 3025 { 3026 int timeridx = gt_virt_redir_timeridx(env); 3027 gt_tval_write(env, ri, timeridx, value); 3028 } 3029 3030 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env, 3031 const ARMCPRegInfo *ri) 3032 { 3033 int timeridx = gt_virt_redir_timeridx(env); 3034 return env->cp15.c14_timer[timeridx].ctl; 3035 } 3036 3037 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 3038 uint64_t value) 3039 { 3040 int timeridx = gt_virt_redir_timeridx(env); 3041 gt_ctl_write(env, ri, timeridx, value); 3042 } 3043 3044 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3045 { 3046 gt_timer_reset(env, ri, GTIMER_HYP); 3047 } 3048 3049 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3050 uint64_t value) 3051 { 3052 gt_cval_write(env, ri, GTIMER_HYP, value); 3053 } 3054 3055 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 3056 { 3057 return gt_tval_read(env, ri, GTIMER_HYP); 3058 } 3059 3060 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3061 uint64_t value) 3062 { 3063 gt_tval_write(env, ri, GTIMER_HYP, value); 3064 } 3065 3066 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 3067 uint64_t value) 3068 { 3069 gt_ctl_write(env, ri, GTIMER_HYP, value); 3070 } 3071 3072 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3073 { 3074 gt_timer_reset(env, ri, GTIMER_SEC); 3075 } 3076 3077 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3078 uint64_t value) 3079 { 3080 gt_cval_write(env, ri, GTIMER_SEC, value); 3081 } 3082 3083 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 3084 { 3085 return gt_tval_read(env, ri, GTIMER_SEC); 3086 } 3087 3088 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3089 uint64_t value) 3090 { 3091 gt_tval_write(env, ri, GTIMER_SEC, value); 3092 } 3093 3094 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 3095 uint64_t value) 3096 { 3097 gt_ctl_write(env, ri, GTIMER_SEC, value); 3098 } 3099 3100 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3101 { 3102 gt_timer_reset(env, ri, GTIMER_HYPVIRT); 3103 } 3104 3105 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3106 uint64_t value) 3107 { 3108 gt_cval_write(env, ri, GTIMER_HYPVIRT, value); 3109 } 3110 3111 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 3112 { 3113 return gt_tval_read(env, ri, GTIMER_HYPVIRT); 3114 } 3115 3116 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3117 uint64_t value) 3118 { 3119 gt_tval_write(env, ri, GTIMER_HYPVIRT, value); 3120 } 3121 3122 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 3123 uint64_t value) 3124 { 3125 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value); 3126 } 3127 3128 void arm_gt_ptimer_cb(void *opaque) 3129 { 3130 ARMCPU *cpu = opaque; 3131 3132 gt_recalc_timer(cpu, GTIMER_PHYS); 3133 } 3134 3135 void arm_gt_vtimer_cb(void *opaque) 3136 { 3137 ARMCPU *cpu = opaque; 3138 3139 gt_recalc_timer(cpu, GTIMER_VIRT); 3140 } 3141 3142 void arm_gt_htimer_cb(void *opaque) 3143 { 3144 ARMCPU *cpu = opaque; 3145 3146 gt_recalc_timer(cpu, GTIMER_HYP); 3147 } 3148 3149 void arm_gt_stimer_cb(void *opaque) 3150 { 3151 ARMCPU *cpu = opaque; 3152 3153 gt_recalc_timer(cpu, GTIMER_SEC); 3154 } 3155 3156 void arm_gt_hvtimer_cb(void *opaque) 3157 { 3158 ARMCPU *cpu = opaque; 3159 3160 gt_recalc_timer(cpu, GTIMER_HYPVIRT); 3161 } 3162 3163 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque) 3164 { 3165 ARMCPU *cpu = env_archcpu(env); 3166 3167 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; 3168 } 3169 3170 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 3171 /* Note that CNTFRQ is purely reads-as-written for the benefit 3172 * of software; writing it doesn't actually change the timer frequency. 3173 * Our reset value matches the fixed frequency we implement the timer at. 3174 */ 3175 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, 3176 .type = ARM_CP_ALIAS, 3177 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 3178 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), 3179 }, 3180 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 3181 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 3182 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 3183 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 3184 .resetfn = arm_gt_cntfrq_reset, 3185 }, 3186 /* overall control: mostly access permissions */ 3187 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, 3188 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, 3189 .access = PL1_RW, 3190 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), 3191 .resetvalue = 0, 3192 }, 3193 /* per-timer control */ 3194 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 3195 .secure = ARM_CP_SECSTATE_NS, 3196 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3197 .accessfn = gt_ptimer_access, 3198 .fieldoffset = offsetoflow32(CPUARMState, 3199 cp15.c14_timer[GTIMER_PHYS].ctl), 3200 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, 3201 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, 3202 }, 3203 { .name = "CNTP_CTL_S", 3204 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 3205 .secure = ARM_CP_SECSTATE_S, 3206 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3207 .accessfn = gt_ptimer_access, 3208 .fieldoffset = offsetoflow32(CPUARMState, 3209 cp15.c14_timer[GTIMER_SEC].ctl), 3210 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 3211 }, 3212 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, 3213 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, 3214 .type = ARM_CP_IO, .access = PL0_RW, 3215 .accessfn = gt_ptimer_access, 3216 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 3217 .resetvalue = 0, 3218 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, 3219 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, 3220 }, 3221 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, 3222 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3223 .accessfn = gt_vtimer_access, 3224 .fieldoffset = offsetoflow32(CPUARMState, 3225 cp15.c14_timer[GTIMER_VIRT].ctl), 3226 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, 3227 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, 3228 }, 3229 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, 3230 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, 3231 .type = ARM_CP_IO, .access = PL0_RW, 3232 .accessfn = gt_vtimer_access, 3233 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 3234 .resetvalue = 0, 3235 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, 3236 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, 3237 }, 3238 /* TimerValue views: a 32 bit downcounting view of the underlying state */ 3239 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 3240 .secure = ARM_CP_SECSTATE_NS, 3241 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3242 .accessfn = gt_ptimer_access, 3243 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, 3244 }, 3245 { .name = "CNTP_TVAL_S", 3246 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 3247 .secure = ARM_CP_SECSTATE_S, 3248 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3249 .accessfn = gt_ptimer_access, 3250 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, 3251 }, 3252 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, 3253 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, 3254 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3255 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, 3256 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, 3257 }, 3258 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, 3259 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3260 .accessfn = gt_vtimer_access, 3261 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, 3262 }, 3263 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, 3264 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, 3265 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3266 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, 3267 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, 3268 }, 3269 /* The counter itself */ 3270 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, 3271 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3272 .accessfn = gt_pct_access, 3273 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 3274 }, 3275 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, 3276 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, 3277 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3278 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 3279 }, 3280 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, 3281 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3282 .accessfn = gt_vct_access, 3283 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 3284 }, 3285 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 3286 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 3287 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3288 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 3289 }, 3290 /* Comparison value, indicating when the timer goes off */ 3291 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, 3292 .secure = ARM_CP_SECSTATE_NS, 3293 .access = PL0_RW, 3294 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3295 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 3296 .accessfn = gt_ptimer_access, 3297 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, 3298 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, 3299 }, 3300 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, 3301 .secure = ARM_CP_SECSTATE_S, 3302 .access = PL0_RW, 3303 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3304 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 3305 .accessfn = gt_ptimer_access, 3306 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 3307 }, 3308 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, 3309 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, 3310 .access = PL0_RW, 3311 .type = ARM_CP_IO, 3312 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 3313 .resetvalue = 0, .accessfn = gt_ptimer_access, 3314 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, 3315 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, 3316 }, 3317 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, 3318 .access = PL0_RW, 3319 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3320 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 3321 .accessfn = gt_vtimer_access, 3322 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, 3323 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, 3324 }, 3325 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, 3326 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, 3327 .access = PL0_RW, 3328 .type = ARM_CP_IO, 3329 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 3330 .resetvalue = 0, .accessfn = gt_vtimer_access, 3331 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, 3332 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, 3333 }, 3334 /* Secure timer -- this is actually restricted to only EL3 3335 * and configurably Secure-EL1 via the accessfn. 3336 */ 3337 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, 3338 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, 3339 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, 3340 .accessfn = gt_stimer_access, 3341 .readfn = gt_sec_tval_read, 3342 .writefn = gt_sec_tval_write, 3343 .resetfn = gt_sec_timer_reset, 3344 }, 3345 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, 3346 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, 3347 .type = ARM_CP_IO, .access = PL1_RW, 3348 .accessfn = gt_stimer_access, 3349 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), 3350 .resetvalue = 0, 3351 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 3352 }, 3353 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, 3354 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, 3355 .type = ARM_CP_IO, .access = PL1_RW, 3356 .accessfn = gt_stimer_access, 3357 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 3358 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 3359 }, 3360 REGINFO_SENTINEL 3361 }; 3362 3363 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, 3364 bool isread) 3365 { 3366 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { 3367 return CP_ACCESS_TRAP; 3368 } 3369 return CP_ACCESS_OK; 3370 } 3371 3372 #else 3373 3374 /* In user-mode most of the generic timer registers are inaccessible 3375 * however modern kernels (4.12+) allow access to cntvct_el0 3376 */ 3377 3378 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 3379 { 3380 ARMCPU *cpu = env_archcpu(env); 3381 3382 /* Currently we have no support for QEMUTimer in linux-user so we 3383 * can't call gt_get_countervalue(env), instead we directly 3384 * call the lower level functions. 3385 */ 3386 return cpu_get_clock() / gt_cntfrq_period_ns(cpu); 3387 } 3388 3389 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 3390 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 3391 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 3392 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */, 3393 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 3394 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE, 3395 }, 3396 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 3397 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 3398 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3399 .readfn = gt_virt_cnt_read, 3400 }, 3401 REGINFO_SENTINEL 3402 }; 3403 3404 #endif 3405 3406 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3407 { 3408 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3409 raw_write(env, ri, value); 3410 } else if (arm_feature(env, ARM_FEATURE_V7)) { 3411 raw_write(env, ri, value & 0xfffff6ff); 3412 } else { 3413 raw_write(env, ri, value & 0xfffff1ff); 3414 } 3415 } 3416 3417 #ifndef CONFIG_USER_ONLY 3418 /* get_phys_addr() isn't present for user-mode-only targets */ 3419 3420 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, 3421 bool isread) 3422 { 3423 if (ri->opc2 & 4) { 3424 /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in 3425 * Secure EL1 (which can only happen if EL3 is AArch64). 3426 * They are simply UNDEF if executed from NS EL1. 3427 * They function normally from EL2 or EL3. 3428 */ 3429 if (arm_current_el(env) == 1) { 3430 if (arm_is_secure_below_el3(env)) { 3431 if (env->cp15.scr_el3 & SCR_EEL2) { 3432 return CP_ACCESS_TRAP_UNCATEGORIZED_EL2; 3433 } 3434 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; 3435 } 3436 return CP_ACCESS_TRAP_UNCATEGORIZED; 3437 } 3438 } 3439 return CP_ACCESS_OK; 3440 } 3441 3442 #ifdef CONFIG_TCG 3443 static uint64_t do_ats_write(CPUARMState *env, uint64_t value, 3444 MMUAccessType access_type, ARMMMUIdx mmu_idx) 3445 { 3446 hwaddr phys_addr; 3447 target_ulong page_size; 3448 int prot; 3449 bool ret; 3450 uint64_t par64; 3451 bool format64 = false; 3452 MemTxAttrs attrs = {}; 3453 ARMMMUFaultInfo fi = {}; 3454 ARMCacheAttrs cacheattrs = {}; 3455 3456 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, 3457 &prot, &page_size, &fi, &cacheattrs); 3458 3459 if (ret) { 3460 /* 3461 * Some kinds of translation fault must cause exceptions rather 3462 * than being reported in the PAR. 3463 */ 3464 int current_el = arm_current_el(env); 3465 int target_el; 3466 uint32_t syn, fsr, fsc; 3467 bool take_exc = false; 3468 3469 if (fi.s1ptw && current_el == 1 3470 && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { 3471 /* 3472 * Synchronous stage 2 fault on an access made as part of the 3473 * translation table walk for AT S1E0* or AT S1E1* insn 3474 * executed from NS EL1. If this is a synchronous external abort 3475 * and SCR_EL3.EA == 1, then we take a synchronous external abort 3476 * to EL3. Otherwise the fault is taken as an exception to EL2, 3477 * and HPFAR_EL2 holds the faulting IPA. 3478 */ 3479 if (fi.type == ARMFault_SyncExternalOnWalk && 3480 (env->cp15.scr_el3 & SCR_EA)) { 3481 target_el = 3; 3482 } else { 3483 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; 3484 if (arm_is_secure_below_el3(env) && fi.s1ns) { 3485 env->cp15.hpfar_el2 |= HPFAR_NS; 3486 } 3487 target_el = 2; 3488 } 3489 take_exc = true; 3490 } else if (fi.type == ARMFault_SyncExternalOnWalk) { 3491 /* 3492 * Synchronous external aborts during a translation table walk 3493 * are taken as Data Abort exceptions. 3494 */ 3495 if (fi.stage2) { 3496 if (current_el == 3) { 3497 target_el = 3; 3498 } else { 3499 target_el = 2; 3500 } 3501 } else { 3502 target_el = exception_target_el(env); 3503 } 3504 take_exc = true; 3505 } 3506 3507 if (take_exc) { 3508 /* Construct FSR and FSC using same logic as arm_deliver_fault() */ 3509 if (target_el == 2 || arm_el_is_aa64(env, target_el) || 3510 arm_s1_regime_using_lpae_format(env, mmu_idx)) { 3511 fsr = arm_fi_to_lfsc(&fi); 3512 fsc = extract32(fsr, 0, 6); 3513 } else { 3514 fsr = arm_fi_to_sfsc(&fi); 3515 fsc = 0x3f; 3516 } 3517 /* 3518 * Report exception with ESR indicating a fault due to a 3519 * translation table walk for a cache maintenance instruction. 3520 */ 3521 syn = syn_data_abort_no_iss(current_el == target_el, 0, 3522 fi.ea, 1, fi.s1ptw, 1, fsc); 3523 env->exception.vaddress = value; 3524 env->exception.fsr = fsr; 3525 raise_exception(env, EXCP_DATA_ABORT, syn, target_el); 3526 } 3527 } 3528 3529 if (is_a64(env)) { 3530 format64 = true; 3531 } else if (arm_feature(env, ARM_FEATURE_LPAE)) { 3532 /* 3533 * ATS1Cxx: 3534 * * TTBCR.EAE determines whether the result is returned using the 3535 * 32-bit or the 64-bit PAR format 3536 * * Instructions executed in Hyp mode always use the 64bit format 3537 * 3538 * ATS1S2NSOxx uses the 64bit format if any of the following is true: 3539 * * The Non-secure TTBCR.EAE bit is set to 1 3540 * * The implementation includes EL2, and the value of HCR.VM is 1 3541 * 3542 * (Note that HCR.DC makes HCR.VM behave as if it is 1.) 3543 * 3544 * ATS1Hx always uses the 64bit format. 3545 */ 3546 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); 3547 3548 if (arm_feature(env, ARM_FEATURE_EL2)) { 3549 if (mmu_idx == ARMMMUIdx_E10_0 || 3550 mmu_idx == ARMMMUIdx_E10_1 || 3551 mmu_idx == ARMMMUIdx_E10_1_PAN) { 3552 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); 3553 } else { 3554 format64 |= arm_current_el(env) == 2; 3555 } 3556 } 3557 } 3558 3559 if (format64) { 3560 /* Create a 64-bit PAR */ 3561 par64 = (1 << 11); /* LPAE bit always set */ 3562 if (!ret) { 3563 par64 |= phys_addr & ~0xfffULL; 3564 if (!attrs.secure) { 3565 par64 |= (1 << 9); /* NS */ 3566 } 3567 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ 3568 par64 |= cacheattrs.shareability << 7; /* SH */ 3569 } else { 3570 uint32_t fsr = arm_fi_to_lfsc(&fi); 3571 3572 par64 |= 1; /* F */ 3573 par64 |= (fsr & 0x3f) << 1; /* FS */ 3574 if (fi.stage2) { 3575 par64 |= (1 << 9); /* S */ 3576 } 3577 if (fi.s1ptw) { 3578 par64 |= (1 << 8); /* PTW */ 3579 } 3580 } 3581 } else { 3582 /* fsr is a DFSR/IFSR value for the short descriptor 3583 * translation table format (with WnR always clear). 3584 * Convert it to a 32-bit PAR. 3585 */ 3586 if (!ret) { 3587 /* We do not set any attribute bits in the PAR */ 3588 if (page_size == (1 << 24) 3589 && arm_feature(env, ARM_FEATURE_V7)) { 3590 par64 = (phys_addr & 0xff000000) | (1 << 1); 3591 } else { 3592 par64 = phys_addr & 0xfffff000; 3593 } 3594 if (!attrs.secure) { 3595 par64 |= (1 << 9); /* NS */ 3596 } 3597 } else { 3598 uint32_t fsr = arm_fi_to_sfsc(&fi); 3599 3600 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | 3601 ((fsr & 0xf) << 1) | 1; 3602 } 3603 } 3604 return par64; 3605 } 3606 #endif /* CONFIG_TCG */ 3607 3608 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3609 { 3610 #ifdef CONFIG_TCG 3611 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3612 uint64_t par64; 3613 ARMMMUIdx mmu_idx; 3614 int el = arm_current_el(env); 3615 bool secure = arm_is_secure_below_el3(env); 3616 3617 switch (ri->opc2 & 6) { 3618 case 0: 3619 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ 3620 switch (el) { 3621 case 3: 3622 mmu_idx = ARMMMUIdx_SE3; 3623 break; 3624 case 2: 3625 g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */ 3626 /* fall through */ 3627 case 1: 3628 if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) { 3629 mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN 3630 : ARMMMUIdx_Stage1_E1_PAN); 3631 } else { 3632 mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1; 3633 } 3634 break; 3635 default: 3636 g_assert_not_reached(); 3637 } 3638 break; 3639 case 2: 3640 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ 3641 switch (el) { 3642 case 3: 3643 mmu_idx = ARMMMUIdx_SE10_0; 3644 break; 3645 case 2: 3646 g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */ 3647 mmu_idx = ARMMMUIdx_Stage1_E0; 3648 break; 3649 case 1: 3650 mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0; 3651 break; 3652 default: 3653 g_assert_not_reached(); 3654 } 3655 break; 3656 case 4: 3657 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ 3658 mmu_idx = ARMMMUIdx_E10_1; 3659 break; 3660 case 6: 3661 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ 3662 mmu_idx = ARMMMUIdx_E10_0; 3663 break; 3664 default: 3665 g_assert_not_reached(); 3666 } 3667 3668 par64 = do_ats_write(env, value, access_type, mmu_idx); 3669 3670 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3671 #else 3672 /* Handled by hardware accelerator. */ 3673 g_assert_not_reached(); 3674 #endif /* CONFIG_TCG */ 3675 } 3676 3677 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, 3678 uint64_t value) 3679 { 3680 #ifdef CONFIG_TCG 3681 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3682 uint64_t par64; 3683 3684 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2); 3685 3686 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3687 #else 3688 /* Handled by hardware accelerator. */ 3689 g_assert_not_reached(); 3690 #endif /* CONFIG_TCG */ 3691 } 3692 3693 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, 3694 bool isread) 3695 { 3696 if (arm_current_el(env) == 3 && 3697 !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) { 3698 return CP_ACCESS_TRAP; 3699 } 3700 return CP_ACCESS_OK; 3701 } 3702 3703 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, 3704 uint64_t value) 3705 { 3706 #ifdef CONFIG_TCG 3707 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3708 ARMMMUIdx mmu_idx; 3709 int secure = arm_is_secure_below_el3(env); 3710 3711 switch (ri->opc2 & 6) { 3712 case 0: 3713 switch (ri->opc1) { 3714 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ 3715 if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) { 3716 mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN 3717 : ARMMMUIdx_Stage1_E1_PAN); 3718 } else { 3719 mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1; 3720 } 3721 break; 3722 case 4: /* AT S1E2R, AT S1E2W */ 3723 mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2; 3724 break; 3725 case 6: /* AT S1E3R, AT S1E3W */ 3726 mmu_idx = ARMMMUIdx_SE3; 3727 break; 3728 default: 3729 g_assert_not_reached(); 3730 } 3731 break; 3732 case 2: /* AT S1E0R, AT S1E0W */ 3733 mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0; 3734 break; 3735 case 4: /* AT S12E1R, AT S12E1W */ 3736 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1; 3737 break; 3738 case 6: /* AT S12E0R, AT S12E0W */ 3739 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0; 3740 break; 3741 default: 3742 g_assert_not_reached(); 3743 } 3744 3745 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); 3746 #else 3747 /* Handled by hardware accelerator. */ 3748 g_assert_not_reached(); 3749 #endif /* CONFIG_TCG */ 3750 } 3751 #endif 3752 3753 static const ARMCPRegInfo vapa_cp_reginfo[] = { 3754 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, 3755 .access = PL1_RW, .resetvalue = 0, 3756 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), 3757 offsetoflow32(CPUARMState, cp15.par_ns) }, 3758 .writefn = par_write }, 3759 #ifndef CONFIG_USER_ONLY 3760 /* This underdecoding is safe because the reginfo is NO_RAW. */ 3761 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, 3762 .access = PL1_W, .accessfn = ats_access, 3763 .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 3764 #endif 3765 REGINFO_SENTINEL 3766 }; 3767 3768 /* Return basic MPU access permission bits. */ 3769 static uint32_t simple_mpu_ap_bits(uint32_t val) 3770 { 3771 uint32_t ret; 3772 uint32_t mask; 3773 int i; 3774 ret = 0; 3775 mask = 3; 3776 for (i = 0; i < 16; i += 2) { 3777 ret |= (val >> i) & mask; 3778 mask <<= 2; 3779 } 3780 return ret; 3781 } 3782 3783 /* Pad basic MPU access permission bits to extended format. */ 3784 static uint32_t extended_mpu_ap_bits(uint32_t val) 3785 { 3786 uint32_t ret; 3787 uint32_t mask; 3788 int i; 3789 ret = 0; 3790 mask = 3; 3791 for (i = 0; i < 16; i += 2) { 3792 ret |= (val & mask) << i; 3793 mask <<= 2; 3794 } 3795 return ret; 3796 } 3797 3798 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3799 uint64_t value) 3800 { 3801 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); 3802 } 3803 3804 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3805 { 3806 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); 3807 } 3808 3809 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3810 uint64_t value) 3811 { 3812 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); 3813 } 3814 3815 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3816 { 3817 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); 3818 } 3819 3820 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) 3821 { 3822 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3823 3824 if (!u32p) { 3825 return 0; 3826 } 3827 3828 u32p += env->pmsav7.rnr[M_REG_NS]; 3829 return *u32p; 3830 } 3831 3832 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 3833 uint64_t value) 3834 { 3835 ARMCPU *cpu = env_archcpu(env); 3836 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3837 3838 if (!u32p) { 3839 return; 3840 } 3841 3842 u32p += env->pmsav7.rnr[M_REG_NS]; 3843 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3844 *u32p = value; 3845 } 3846 3847 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3848 uint64_t value) 3849 { 3850 ARMCPU *cpu = env_archcpu(env); 3851 uint32_t nrgs = cpu->pmsav7_dregion; 3852 3853 if (value >= nrgs) { 3854 qemu_log_mask(LOG_GUEST_ERROR, 3855 "PMSAv7 RGNR write >= # supported regions, %" PRIu32 3856 " > %" PRIu32 "\n", (uint32_t)value, nrgs); 3857 return; 3858 } 3859 3860 raw_write(env, ri, value); 3861 } 3862 3863 static const ARMCPRegInfo pmsav7_cp_reginfo[] = { 3864 /* Reset for all these registers is handled in arm_cpu_reset(), 3865 * because the PMSAv7 is also used by M-profile CPUs, which do 3866 * not register cpregs but still need the state to be reset. 3867 */ 3868 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, 3869 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3870 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), 3871 .readfn = pmsav7_read, .writefn = pmsav7_write, 3872 .resetfn = arm_cp_reset_ignore }, 3873 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, 3874 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3875 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), 3876 .readfn = pmsav7_read, .writefn = pmsav7_write, 3877 .resetfn = arm_cp_reset_ignore }, 3878 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, 3879 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3880 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), 3881 .readfn = pmsav7_read, .writefn = pmsav7_write, 3882 .resetfn = arm_cp_reset_ignore }, 3883 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, 3884 .access = PL1_RW, 3885 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), 3886 .writefn = pmsav7_rgnr_write, 3887 .resetfn = arm_cp_reset_ignore }, 3888 REGINFO_SENTINEL 3889 }; 3890 3891 static const ARMCPRegInfo pmsav5_cp_reginfo[] = { 3892 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 3893 .access = PL1_RW, .type = ARM_CP_ALIAS, 3894 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3895 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, 3896 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 3897 .access = PL1_RW, .type = ARM_CP_ALIAS, 3898 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3899 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, 3900 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, 3901 .access = PL1_RW, 3902 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3903 .resetvalue = 0, }, 3904 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, 3905 .access = PL1_RW, 3906 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3907 .resetvalue = 0, }, 3908 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 3909 .access = PL1_RW, 3910 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, 3911 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, 3912 .access = PL1_RW, 3913 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, 3914 /* Protection region base and size registers */ 3915 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, 3916 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3917 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, 3918 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, 3919 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3920 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, 3921 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, 3922 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3923 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, 3924 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, 3925 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3926 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, 3927 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, 3928 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3929 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, 3930 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, 3931 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3932 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, 3933 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, 3934 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3935 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, 3936 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, 3937 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3938 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, 3939 REGINFO_SENTINEL 3940 }; 3941 3942 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 3943 uint64_t value) 3944 { 3945 TCR *tcr = raw_ptr(env, ri); 3946 int maskshift = extract32(value, 0, 3); 3947 3948 if (!arm_feature(env, ARM_FEATURE_V8)) { 3949 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { 3950 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when 3951 * using Long-desciptor translation table format */ 3952 value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); 3953 } else if (arm_feature(env, ARM_FEATURE_EL3)) { 3954 /* In an implementation that includes the Security Extensions 3955 * TTBCR has additional fields PD0 [4] and PD1 [5] for 3956 * Short-descriptor translation table format. 3957 */ 3958 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; 3959 } else { 3960 value &= TTBCR_N; 3961 } 3962 } 3963 3964 /* Update the masks corresponding to the TCR bank being written 3965 * Note that we always calculate mask and base_mask, but 3966 * they are only used for short-descriptor tables (ie if EAE is 0); 3967 * for long-descriptor tables the TCR fields are used differently 3968 * and the mask and base_mask values are meaningless. 3969 */ 3970 tcr->raw_tcr = value; 3971 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); 3972 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); 3973 } 3974 3975 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3976 uint64_t value) 3977 { 3978 ARMCPU *cpu = env_archcpu(env); 3979 TCR *tcr = raw_ptr(env, ri); 3980 3981 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3982 /* With LPAE the TTBCR could result in a change of ASID 3983 * via the TTBCR.A1 bit, so do a TLB flush. 3984 */ 3985 tlb_flush(CPU(cpu)); 3986 } 3987 /* Preserve the high half of TCR_EL1, set via TTBCR2. */ 3988 value = deposit64(tcr->raw_tcr, 0, 32, value); 3989 vmsa_ttbcr_raw_write(env, ri, value); 3990 } 3991 3992 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3993 { 3994 TCR *tcr = raw_ptr(env, ri); 3995 3996 /* Reset both the TCR as well as the masks corresponding to the bank of 3997 * the TCR being reset. 3998 */ 3999 tcr->raw_tcr = 0; 4000 tcr->mask = 0; 4001 tcr->base_mask = 0xffffc000u; 4002 } 4003 4004 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri, 4005 uint64_t value) 4006 { 4007 ARMCPU *cpu = env_archcpu(env); 4008 TCR *tcr = raw_ptr(env, ri); 4009 4010 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ 4011 tlb_flush(CPU(cpu)); 4012 tcr->raw_tcr = value; 4013 } 4014 4015 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4016 uint64_t value) 4017 { 4018 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ 4019 if (cpreg_field_is_64bit(ri) && 4020 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { 4021 ARMCPU *cpu = env_archcpu(env); 4022 tlb_flush(CPU(cpu)); 4023 } 4024 raw_write(env, ri, value); 4025 } 4026 4027 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4028 uint64_t value) 4029 { 4030 /* 4031 * If we are running with E2&0 regime, then an ASID is active. 4032 * Flush if that might be changing. Note we're not checking 4033 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that 4034 * holds the active ASID, only checking the field that might. 4035 */ 4036 if (extract64(raw_read(env, ri) ^ value, 48, 16) && 4037 (arm_hcr_el2_eff(env) & HCR_E2H)) { 4038 uint16_t mask = ARMMMUIdxBit_E20_2 | 4039 ARMMMUIdxBit_E20_2_PAN | 4040 ARMMMUIdxBit_E20_0; 4041 4042 if (arm_is_secure_below_el3(env)) { 4043 mask >>= ARM_MMU_IDX_A_NS; 4044 } 4045 4046 tlb_flush_by_mmuidx(env_cpu(env), mask); 4047 } 4048 raw_write(env, ri, value); 4049 } 4050 4051 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4052 uint64_t value) 4053 { 4054 ARMCPU *cpu = env_archcpu(env); 4055 CPUState *cs = CPU(cpu); 4056 4057 /* 4058 * A change in VMID to the stage2 page table (Stage2) invalidates 4059 * the combined stage 1&2 tlbs (EL10_1 and EL10_0). 4060 */ 4061 if (raw_read(env, ri) != value) { 4062 uint16_t mask = ARMMMUIdxBit_E10_1 | 4063 ARMMMUIdxBit_E10_1_PAN | 4064 ARMMMUIdxBit_E10_0; 4065 4066 if (arm_is_secure_below_el3(env)) { 4067 mask >>= ARM_MMU_IDX_A_NS; 4068 } 4069 4070 tlb_flush_by_mmuidx(cs, mask); 4071 raw_write(env, ri, value); 4072 } 4073 } 4074 4075 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { 4076 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 4077 .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, 4078 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), 4079 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, 4080 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 4081 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 4082 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), 4083 offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, 4084 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, 4085 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 4086 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), 4087 offsetof(CPUARMState, cp15.dfar_ns) } }, 4088 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, 4089 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, 4090 .access = PL1_RW, .accessfn = access_tvm_trvm, 4091 .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), 4092 .resetvalue = 0, }, 4093 REGINFO_SENTINEL 4094 }; 4095 4096 static const ARMCPRegInfo vmsa_cp_reginfo[] = { 4097 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, 4098 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, 4099 .access = PL1_RW, .accessfn = access_tvm_trvm, 4100 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, 4101 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, 4102 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, 4103 .access = PL1_RW, .accessfn = access_tvm_trvm, 4104 .writefn = vmsa_ttbr_write, .resetvalue = 0, 4105 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 4106 offsetof(CPUARMState, cp15.ttbr0_ns) } }, 4107 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, 4108 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, 4109 .access = PL1_RW, .accessfn = access_tvm_trvm, 4110 .writefn = vmsa_ttbr_write, .resetvalue = 0, 4111 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 4112 offsetof(CPUARMState, cp15.ttbr1_ns) } }, 4113 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, 4114 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 4115 .access = PL1_RW, .accessfn = access_tvm_trvm, 4116 .writefn = vmsa_tcr_el12_write, 4117 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, 4118 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, 4119 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 4120 .access = PL1_RW, .accessfn = access_tvm_trvm, 4121 .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, 4122 .raw_writefn = vmsa_ttbcr_raw_write, 4123 /* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */ 4124 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.tcr_el[3]), 4125 offsetof(CPUARMState, cp15.tcr_el[1])} }, 4126 REGINFO_SENTINEL 4127 }; 4128 4129 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing 4130 * qemu tlbs nor adjusting cached masks. 4131 */ 4132 static const ARMCPRegInfo ttbcr2_reginfo = { 4133 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3, 4134 .access = PL1_RW, .accessfn = access_tvm_trvm, 4135 .type = ARM_CP_ALIAS, 4136 .bank_fieldoffsets = { 4137 offsetofhigh32(CPUARMState, cp15.tcr_el[3].raw_tcr), 4138 offsetofhigh32(CPUARMState, cp15.tcr_el[1].raw_tcr), 4139 }, 4140 }; 4141 4142 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, 4143 uint64_t value) 4144 { 4145 env->cp15.c15_ticonfig = value & 0xe7; 4146 /* The OS_TYPE bit in this register changes the reported CPUID! */ 4147 env->cp15.c0_cpuid = (value & (1 << 5)) ? 4148 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 4149 } 4150 4151 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, 4152 uint64_t value) 4153 { 4154 env->cp15.c15_threadid = value & 0xffff; 4155 } 4156 4157 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, 4158 uint64_t value) 4159 { 4160 /* Wait-for-interrupt (deprecated) */ 4161 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); 4162 } 4163 4164 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, 4165 uint64_t value) 4166 { 4167 /* On OMAP there are registers indicating the max/min index of dcache lines 4168 * containing a dirty line; cache flush operations have to reset these. 4169 */ 4170 env->cp15.c15_i_max = 0x000; 4171 env->cp15.c15_i_min = 0xff0; 4172 } 4173 4174 static const ARMCPRegInfo omap_cp_reginfo[] = { 4175 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, 4176 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, 4177 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), 4178 .resetvalue = 0, }, 4179 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, 4180 .access = PL1_RW, .type = ARM_CP_NOP }, 4181 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, 4182 .access = PL1_RW, 4183 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, 4184 .writefn = omap_ticonfig_write }, 4185 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, 4186 .access = PL1_RW, 4187 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, 4188 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, 4189 .access = PL1_RW, .resetvalue = 0xff0, 4190 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, 4191 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, 4192 .access = PL1_RW, 4193 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, 4194 .writefn = omap_threadid_write }, 4195 { .name = "TI925T_STATUS", .cp = 15, .crn = 15, 4196 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 4197 .type = ARM_CP_NO_RAW, 4198 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, 4199 /* TODO: Peripheral port remap register: 4200 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller 4201 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), 4202 * when MMU is off. 4203 */ 4204 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 4205 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 4206 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, 4207 .writefn = omap_cachemaint_write }, 4208 { .name = "C9", .cp = 15, .crn = 9, 4209 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, 4210 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, 4211 REGINFO_SENTINEL 4212 }; 4213 4214 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, 4215 uint64_t value) 4216 { 4217 env->cp15.c15_cpar = value & 0x3fff; 4218 } 4219 4220 static const ARMCPRegInfo xscale_cp_reginfo[] = { 4221 { .name = "XSCALE_CPAR", 4222 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 4223 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, 4224 .writefn = xscale_cpar_write, }, 4225 { .name = "XSCALE_AUXCR", 4226 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, 4227 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), 4228 .resetvalue = 0, }, 4229 /* XScale specific cache-lockdown: since we have no cache we NOP these 4230 * and hope the guest does not really rely on cache behaviour. 4231 */ 4232 { .name = "XSCALE_LOCK_ICACHE_LINE", 4233 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, 4234 .access = PL1_W, .type = ARM_CP_NOP }, 4235 { .name = "XSCALE_UNLOCK_ICACHE", 4236 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, 4237 .access = PL1_W, .type = ARM_CP_NOP }, 4238 { .name = "XSCALE_DCACHE_LOCK", 4239 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, 4240 .access = PL1_RW, .type = ARM_CP_NOP }, 4241 { .name = "XSCALE_UNLOCK_DCACHE", 4242 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, 4243 .access = PL1_W, .type = ARM_CP_NOP }, 4244 REGINFO_SENTINEL 4245 }; 4246 4247 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { 4248 /* RAZ/WI the whole crn=15 space, when we don't have a more specific 4249 * implementation of this implementation-defined space. 4250 * Ideally this should eventually disappear in favour of actually 4251 * implementing the correct behaviour for all cores. 4252 */ 4253 { .name = "C15_IMPDEF", .cp = 15, .crn = 15, 4254 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 4255 .access = PL1_RW, 4256 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, 4257 .resetvalue = 0 }, 4258 REGINFO_SENTINEL 4259 }; 4260 4261 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { 4262 /* Cache status: RAZ because we have no cache so it's always clean */ 4263 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, 4264 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4265 .resetvalue = 0 }, 4266 REGINFO_SENTINEL 4267 }; 4268 4269 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { 4270 /* We never have a a block transfer operation in progress */ 4271 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, 4272 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4273 .resetvalue = 0 }, 4274 /* The cache ops themselves: these all NOP for QEMU */ 4275 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, 4276 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4277 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, 4278 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4279 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, 4280 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4281 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, 4282 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4283 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, 4284 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4285 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, 4286 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4287 REGINFO_SENTINEL 4288 }; 4289 4290 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { 4291 /* The cache test-and-clean instructions always return (1 << 30) 4292 * to indicate that there are no dirty cache lines. 4293 */ 4294 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, 4295 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4296 .resetvalue = (1 << 30) }, 4297 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, 4298 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4299 .resetvalue = (1 << 30) }, 4300 REGINFO_SENTINEL 4301 }; 4302 4303 static const ARMCPRegInfo strongarm_cp_reginfo[] = { 4304 /* Ignore ReadBuffer accesses */ 4305 { .name = "C9_READBUFFER", .cp = 15, .crn = 9, 4306 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 4307 .access = PL1_RW, .resetvalue = 0, 4308 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, 4309 REGINFO_SENTINEL 4310 }; 4311 4312 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4313 { 4314 unsigned int cur_el = arm_current_el(env); 4315 4316 if (arm_is_el2_enabled(env) && cur_el == 1) { 4317 return env->cp15.vpidr_el2; 4318 } 4319 return raw_read(env, ri); 4320 } 4321 4322 static uint64_t mpidr_read_val(CPUARMState *env) 4323 { 4324 ARMCPU *cpu = env_archcpu(env); 4325 uint64_t mpidr = cpu->mp_affinity; 4326 4327 if (arm_feature(env, ARM_FEATURE_V7MP)) { 4328 mpidr |= (1U << 31); 4329 /* Cores which are uniprocessor (non-coherent) 4330 * but still implement the MP extensions set 4331 * bit 30. (For instance, Cortex-R5). 4332 */ 4333 if (cpu->mp_is_up) { 4334 mpidr |= (1u << 30); 4335 } 4336 } 4337 return mpidr; 4338 } 4339 4340 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4341 { 4342 unsigned int cur_el = arm_current_el(env); 4343 4344 if (arm_is_el2_enabled(env) && cur_el == 1) { 4345 return env->cp15.vmpidr_el2; 4346 } 4347 return mpidr_read_val(env); 4348 } 4349 4350 static const ARMCPRegInfo lpae_cp_reginfo[] = { 4351 /* NOP AMAIR0/1 */ 4352 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, 4353 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, 4354 .access = PL1_RW, .accessfn = access_tvm_trvm, 4355 .type = ARM_CP_CONST, .resetvalue = 0 }, 4356 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ 4357 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, 4358 .access = PL1_RW, .accessfn = access_tvm_trvm, 4359 .type = ARM_CP_CONST, .resetvalue = 0 }, 4360 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, 4361 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, 4362 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), 4363 offsetof(CPUARMState, cp15.par_ns)} }, 4364 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, 4365 .access = PL1_RW, .accessfn = access_tvm_trvm, 4366 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4367 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 4368 offsetof(CPUARMState, cp15.ttbr0_ns) }, 4369 .writefn = vmsa_ttbr_write, }, 4370 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, 4371 .access = PL1_RW, .accessfn = access_tvm_trvm, 4372 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4373 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 4374 offsetof(CPUARMState, cp15.ttbr1_ns) }, 4375 .writefn = vmsa_ttbr_write, }, 4376 REGINFO_SENTINEL 4377 }; 4378 4379 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4380 { 4381 return vfp_get_fpcr(env); 4382 } 4383 4384 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4385 uint64_t value) 4386 { 4387 vfp_set_fpcr(env, value); 4388 } 4389 4390 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4391 { 4392 return vfp_get_fpsr(env); 4393 } 4394 4395 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4396 uint64_t value) 4397 { 4398 vfp_set_fpsr(env, value); 4399 } 4400 4401 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, 4402 bool isread) 4403 { 4404 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { 4405 return CP_ACCESS_TRAP; 4406 } 4407 return CP_ACCESS_OK; 4408 } 4409 4410 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, 4411 uint64_t value) 4412 { 4413 env->daif = value & PSTATE_DAIF; 4414 } 4415 4416 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri) 4417 { 4418 return env->pstate & PSTATE_PAN; 4419 } 4420 4421 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri, 4422 uint64_t value) 4423 { 4424 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); 4425 } 4426 4427 static const ARMCPRegInfo pan_reginfo = { 4428 .name = "PAN", .state = ARM_CP_STATE_AA64, 4429 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3, 4430 .type = ARM_CP_NO_RAW, .access = PL1_RW, 4431 .readfn = aa64_pan_read, .writefn = aa64_pan_write 4432 }; 4433 4434 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri) 4435 { 4436 return env->pstate & PSTATE_UAO; 4437 } 4438 4439 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri, 4440 uint64_t value) 4441 { 4442 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); 4443 } 4444 4445 static const ARMCPRegInfo uao_reginfo = { 4446 .name = "UAO", .state = ARM_CP_STATE_AA64, 4447 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4, 4448 .type = ARM_CP_NO_RAW, .access = PL1_RW, 4449 .readfn = aa64_uao_read, .writefn = aa64_uao_write 4450 }; 4451 4452 static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri) 4453 { 4454 return env->pstate & PSTATE_DIT; 4455 } 4456 4457 static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri, 4458 uint64_t value) 4459 { 4460 env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT); 4461 } 4462 4463 static const ARMCPRegInfo dit_reginfo = { 4464 .name = "DIT", .state = ARM_CP_STATE_AA64, 4465 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5, 4466 .type = ARM_CP_NO_RAW, .access = PL0_RW, 4467 .readfn = aa64_dit_read, .writefn = aa64_dit_write 4468 }; 4469 4470 static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri) 4471 { 4472 return env->pstate & PSTATE_SSBS; 4473 } 4474 4475 static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri, 4476 uint64_t value) 4477 { 4478 env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS); 4479 } 4480 4481 static const ARMCPRegInfo ssbs_reginfo = { 4482 .name = "SSBS", .state = ARM_CP_STATE_AA64, 4483 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6, 4484 .type = ARM_CP_NO_RAW, .access = PL0_RW, 4485 .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write 4486 }; 4487 4488 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env, 4489 const ARMCPRegInfo *ri, 4490 bool isread) 4491 { 4492 /* Cache invalidate/clean to Point of Coherency or Persistence... */ 4493 switch (arm_current_el(env)) { 4494 case 0: 4495 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ 4496 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { 4497 return CP_ACCESS_TRAP; 4498 } 4499 /* fall through */ 4500 case 1: 4501 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */ 4502 if (arm_hcr_el2_eff(env) & HCR_TPCP) { 4503 return CP_ACCESS_TRAP_EL2; 4504 } 4505 break; 4506 } 4507 return CP_ACCESS_OK; 4508 } 4509 4510 static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env, 4511 const ARMCPRegInfo *ri, 4512 bool isread) 4513 { 4514 /* Cache invalidate/clean to Point of Unification... */ 4515 switch (arm_current_el(env)) { 4516 case 0: 4517 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ 4518 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { 4519 return CP_ACCESS_TRAP; 4520 } 4521 /* fall through */ 4522 case 1: 4523 /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */ 4524 if (arm_hcr_el2_eff(env) & HCR_TPU) { 4525 return CP_ACCESS_TRAP_EL2; 4526 } 4527 break; 4528 } 4529 return CP_ACCESS_OK; 4530 } 4531 4532 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions 4533 * Page D4-1736 (DDI0487A.b) 4534 */ 4535 4536 static int vae1_tlbmask(CPUARMState *env) 4537 { 4538 uint64_t hcr = arm_hcr_el2_eff(env); 4539 uint16_t mask; 4540 4541 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 4542 mask = ARMMMUIdxBit_E20_2 | 4543 ARMMMUIdxBit_E20_2_PAN | 4544 ARMMMUIdxBit_E20_0; 4545 } else { 4546 mask = ARMMMUIdxBit_E10_1 | 4547 ARMMMUIdxBit_E10_1_PAN | 4548 ARMMMUIdxBit_E10_0; 4549 } 4550 4551 if (arm_is_secure_below_el3(env)) { 4552 mask >>= ARM_MMU_IDX_A_NS; 4553 } 4554 4555 return mask; 4556 } 4557 4558 /* Return 56 if TBI is enabled, 64 otherwise. */ 4559 static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx, 4560 uint64_t addr) 4561 { 4562 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 4563 int tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 4564 int select = extract64(addr, 55, 1); 4565 4566 return (tbi >> select) & 1 ? 56 : 64; 4567 } 4568 4569 static int vae1_tlbbits(CPUARMState *env, uint64_t addr) 4570 { 4571 uint64_t hcr = arm_hcr_el2_eff(env); 4572 ARMMMUIdx mmu_idx; 4573 4574 /* Only the regime of the mmu_idx below is significant. */ 4575 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 4576 mmu_idx = ARMMMUIdx_E20_0; 4577 } else { 4578 mmu_idx = ARMMMUIdx_E10_0; 4579 } 4580 4581 if (arm_is_secure_below_el3(env)) { 4582 mmu_idx &= ~ARM_MMU_IDX_A_NS; 4583 } 4584 4585 return tlbbits_for_regime(env, mmu_idx, addr); 4586 } 4587 4588 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4589 uint64_t value) 4590 { 4591 CPUState *cs = env_cpu(env); 4592 int mask = vae1_tlbmask(env); 4593 4594 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4595 } 4596 4597 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4598 uint64_t value) 4599 { 4600 CPUState *cs = env_cpu(env); 4601 int mask = vae1_tlbmask(env); 4602 4603 if (tlb_force_broadcast(env)) { 4604 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4605 } else { 4606 tlb_flush_by_mmuidx(cs, mask); 4607 } 4608 } 4609 4610 static int alle1_tlbmask(CPUARMState *env) 4611 { 4612 /* 4613 * Note that the 'ALL' scope must invalidate both stage 1 and 4614 * stage 2 translations, whereas most other scopes only invalidate 4615 * stage 1 translations. 4616 */ 4617 if (arm_is_secure_below_el3(env)) { 4618 return ARMMMUIdxBit_SE10_1 | 4619 ARMMMUIdxBit_SE10_1_PAN | 4620 ARMMMUIdxBit_SE10_0; 4621 } else { 4622 return ARMMMUIdxBit_E10_1 | 4623 ARMMMUIdxBit_E10_1_PAN | 4624 ARMMMUIdxBit_E10_0; 4625 } 4626 } 4627 4628 static int e2_tlbmask(CPUARMState *env) 4629 { 4630 if (arm_is_secure_below_el3(env)) { 4631 return ARMMMUIdxBit_SE20_0 | 4632 ARMMMUIdxBit_SE20_2 | 4633 ARMMMUIdxBit_SE20_2_PAN | 4634 ARMMMUIdxBit_SE2; 4635 } else { 4636 return ARMMMUIdxBit_E20_0 | 4637 ARMMMUIdxBit_E20_2 | 4638 ARMMMUIdxBit_E20_2_PAN | 4639 ARMMMUIdxBit_E2; 4640 } 4641 } 4642 4643 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4644 uint64_t value) 4645 { 4646 CPUState *cs = env_cpu(env); 4647 int mask = alle1_tlbmask(env); 4648 4649 tlb_flush_by_mmuidx(cs, mask); 4650 } 4651 4652 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4653 uint64_t value) 4654 { 4655 CPUState *cs = env_cpu(env); 4656 int mask = e2_tlbmask(env); 4657 4658 tlb_flush_by_mmuidx(cs, mask); 4659 } 4660 4661 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 4662 uint64_t value) 4663 { 4664 ARMCPU *cpu = env_archcpu(env); 4665 CPUState *cs = CPU(cpu); 4666 4667 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3); 4668 } 4669 4670 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4671 uint64_t value) 4672 { 4673 CPUState *cs = env_cpu(env); 4674 int mask = alle1_tlbmask(env); 4675 4676 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4677 } 4678 4679 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4680 uint64_t value) 4681 { 4682 CPUState *cs = env_cpu(env); 4683 int mask = e2_tlbmask(env); 4684 4685 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4686 } 4687 4688 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4689 uint64_t value) 4690 { 4691 CPUState *cs = env_cpu(env); 4692 4693 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3); 4694 } 4695 4696 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4697 uint64_t value) 4698 { 4699 /* Invalidate by VA, EL2 4700 * Currently handles both VAE2 and VALE2, since we don't support 4701 * flush-last-level-only. 4702 */ 4703 CPUState *cs = env_cpu(env); 4704 int mask = e2_tlbmask(env); 4705 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4706 4707 tlb_flush_page_by_mmuidx(cs, pageaddr, mask); 4708 } 4709 4710 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, 4711 uint64_t value) 4712 { 4713 /* Invalidate by VA, EL3 4714 * Currently handles both VAE3 and VALE3, since we don't support 4715 * flush-last-level-only. 4716 */ 4717 ARMCPU *cpu = env_archcpu(env); 4718 CPUState *cs = CPU(cpu); 4719 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4720 4721 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3); 4722 } 4723 4724 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4725 uint64_t value) 4726 { 4727 CPUState *cs = env_cpu(env); 4728 int mask = vae1_tlbmask(env); 4729 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4730 int bits = vae1_tlbbits(env, pageaddr); 4731 4732 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); 4733 } 4734 4735 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4736 uint64_t value) 4737 { 4738 /* Invalidate by VA, EL1&0 (AArch64 version). 4739 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, 4740 * since we don't support flush-for-specific-ASID-only or 4741 * flush-last-level-only. 4742 */ 4743 CPUState *cs = env_cpu(env); 4744 int mask = vae1_tlbmask(env); 4745 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4746 int bits = vae1_tlbbits(env, pageaddr); 4747 4748 if (tlb_force_broadcast(env)) { 4749 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); 4750 } else { 4751 tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits); 4752 } 4753 } 4754 4755 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4756 uint64_t value) 4757 { 4758 CPUState *cs = env_cpu(env); 4759 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4760 bool secure = arm_is_secure_below_el3(env); 4761 int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2; 4762 int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2, 4763 pageaddr); 4764 4765 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); 4766 } 4767 4768 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4769 uint64_t value) 4770 { 4771 CPUState *cs = env_cpu(env); 4772 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4773 int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr); 4774 4775 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, 4776 ARMMMUIdxBit_SE3, bits); 4777 } 4778 4779 #ifdef TARGET_AARCH64 4780 static uint64_t tlbi_aa64_range_get_length(CPUARMState *env, 4781 uint64_t value) 4782 { 4783 unsigned int page_shift; 4784 unsigned int page_size_granule; 4785 uint64_t num; 4786 uint64_t scale; 4787 uint64_t exponent; 4788 uint64_t length; 4789 4790 num = extract64(value, 39, 4); 4791 scale = extract64(value, 44, 2); 4792 page_size_granule = extract64(value, 46, 2); 4793 4794 page_shift = page_size_granule * 2 + 12; 4795 4796 if (page_size_granule == 0) { 4797 qemu_log_mask(LOG_GUEST_ERROR, "Invalid page size granule %d\n", 4798 page_size_granule); 4799 return 0; 4800 } 4801 4802 exponent = (5 * scale) + 1; 4803 length = (num + 1) << (exponent + page_shift); 4804 4805 return length; 4806 } 4807 4808 static uint64_t tlbi_aa64_range_get_base(CPUARMState *env, uint64_t value, 4809 bool two_ranges) 4810 { 4811 /* TODO: ARMv8.7 FEAT_LPA2 */ 4812 uint64_t pageaddr; 4813 4814 if (two_ranges) { 4815 pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS; 4816 } else { 4817 pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS; 4818 } 4819 4820 return pageaddr; 4821 } 4822 4823 static void do_rvae_write(CPUARMState *env, uint64_t value, 4824 int idxmap, bool synced) 4825 { 4826 ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap); 4827 bool two_ranges = regime_has_2_ranges(one_idx); 4828 uint64_t baseaddr, length; 4829 int bits; 4830 4831 baseaddr = tlbi_aa64_range_get_base(env, value, two_ranges); 4832 length = tlbi_aa64_range_get_length(env, value); 4833 bits = tlbbits_for_regime(env, one_idx, baseaddr); 4834 4835 if (synced) { 4836 tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env), 4837 baseaddr, 4838 length, 4839 idxmap, 4840 bits); 4841 } else { 4842 tlb_flush_range_by_mmuidx(env_cpu(env), baseaddr, 4843 length, idxmap, bits); 4844 } 4845 } 4846 4847 static void tlbi_aa64_rvae1_write(CPUARMState *env, 4848 const ARMCPRegInfo *ri, 4849 uint64_t value) 4850 { 4851 /* 4852 * Invalidate by VA range, EL1&0. 4853 * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1, 4854 * since we don't support flush-for-specific-ASID-only or 4855 * flush-last-level-only. 4856 */ 4857 4858 do_rvae_write(env, value, vae1_tlbmask(env), 4859 tlb_force_broadcast(env)); 4860 } 4861 4862 static void tlbi_aa64_rvae1is_write(CPUARMState *env, 4863 const ARMCPRegInfo *ri, 4864 uint64_t value) 4865 { 4866 /* 4867 * Invalidate by VA range, Inner/Outer Shareable EL1&0. 4868 * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS, 4869 * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support 4870 * flush-for-specific-ASID-only, flush-last-level-only or inner/outer 4871 * shareable specific flushes. 4872 */ 4873 4874 do_rvae_write(env, value, vae1_tlbmask(env), true); 4875 } 4876 4877 static int vae2_tlbmask(CPUARMState *env) 4878 { 4879 return (arm_is_secure_below_el3(env) 4880 ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2); 4881 } 4882 4883 static void tlbi_aa64_rvae2_write(CPUARMState *env, 4884 const ARMCPRegInfo *ri, 4885 uint64_t value) 4886 { 4887 /* 4888 * Invalidate by VA range, EL2. 4889 * Currently handles all of RVAE2 and RVALE2, 4890 * since we don't support flush-for-specific-ASID-only or 4891 * flush-last-level-only. 4892 */ 4893 4894 do_rvae_write(env, value, vae2_tlbmask(env), 4895 tlb_force_broadcast(env)); 4896 4897 4898 } 4899 4900 static void tlbi_aa64_rvae2is_write(CPUARMState *env, 4901 const ARMCPRegInfo *ri, 4902 uint64_t value) 4903 { 4904 /* 4905 * Invalidate by VA range, Inner/Outer Shareable, EL2. 4906 * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS, 4907 * since we don't support flush-for-specific-ASID-only, 4908 * flush-last-level-only or inner/outer shareable specific flushes. 4909 */ 4910 4911 do_rvae_write(env, value, vae2_tlbmask(env), true); 4912 4913 } 4914 4915 static void tlbi_aa64_rvae3_write(CPUARMState *env, 4916 const ARMCPRegInfo *ri, 4917 uint64_t value) 4918 { 4919 /* 4920 * Invalidate by VA range, EL3. 4921 * Currently handles all of RVAE3 and RVALE3, 4922 * since we don't support flush-for-specific-ASID-only or 4923 * flush-last-level-only. 4924 */ 4925 4926 do_rvae_write(env, value, ARMMMUIdxBit_SE3, 4927 tlb_force_broadcast(env)); 4928 } 4929 4930 static void tlbi_aa64_rvae3is_write(CPUARMState *env, 4931 const ARMCPRegInfo *ri, 4932 uint64_t value) 4933 { 4934 /* 4935 * Invalidate by VA range, EL3, Inner/Outer Shareable. 4936 * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS, 4937 * since we don't support flush-for-specific-ASID-only, 4938 * flush-last-level-only or inner/outer specific flushes. 4939 */ 4940 4941 do_rvae_write(env, value, ARMMMUIdxBit_SE3, true); 4942 } 4943 #endif 4944 4945 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, 4946 bool isread) 4947 { 4948 int cur_el = arm_current_el(env); 4949 4950 if (cur_el < 2) { 4951 uint64_t hcr = arm_hcr_el2_eff(env); 4952 4953 if (cur_el == 0) { 4954 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 4955 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) { 4956 return CP_ACCESS_TRAP_EL2; 4957 } 4958 } else { 4959 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { 4960 return CP_ACCESS_TRAP; 4961 } 4962 if (hcr & HCR_TDZ) { 4963 return CP_ACCESS_TRAP_EL2; 4964 } 4965 } 4966 } else if (hcr & HCR_TDZ) { 4967 return CP_ACCESS_TRAP_EL2; 4968 } 4969 } 4970 return CP_ACCESS_OK; 4971 } 4972 4973 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 4974 { 4975 ARMCPU *cpu = env_archcpu(env); 4976 int dzp_bit = 1 << 4; 4977 4978 /* DZP indicates whether DC ZVA access is allowed */ 4979 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { 4980 dzp_bit = 0; 4981 } 4982 return cpu->dcz_blocksize | dzp_bit; 4983 } 4984 4985 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 4986 bool isread) 4987 { 4988 if (!(env->pstate & PSTATE_SP)) { 4989 /* Access to SP_EL0 is undefined if it's being used as 4990 * the stack pointer. 4991 */ 4992 return CP_ACCESS_TRAP_UNCATEGORIZED; 4993 } 4994 return CP_ACCESS_OK; 4995 } 4996 4997 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) 4998 { 4999 return env->pstate & PSTATE_SP; 5000 } 5001 5002 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 5003 { 5004 update_spsel(env, val); 5005 } 5006 5007 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5008 uint64_t value) 5009 { 5010 ARMCPU *cpu = env_archcpu(env); 5011 5012 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { 5013 /* M bit is RAZ/WI for PMSA with no MPU implemented */ 5014 value &= ~SCTLR_M; 5015 } 5016 5017 /* ??? Lots of these bits are not implemented. */ 5018 5019 if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) { 5020 if (ri->opc1 == 6) { /* SCTLR_EL3 */ 5021 value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA); 5022 } else { 5023 value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF | 5024 SCTLR_ATA0 | SCTLR_ATA); 5025 } 5026 } 5027 5028 if (raw_read(env, ri) == value) { 5029 /* Skip the TLB flush if nothing actually changed; Linux likes 5030 * to do a lot of pointless SCTLR writes. 5031 */ 5032 return; 5033 } 5034 5035 raw_write(env, ri, value); 5036 5037 /* This may enable/disable the MMU, so do a TLB flush. */ 5038 tlb_flush(CPU(cpu)); 5039 5040 if (ri->type & ARM_CP_SUPPRESS_TB_END) { 5041 /* 5042 * Normally we would always end the TB on an SCTLR write; see the 5043 * comment in ARMCPRegInfo sctlr initialization below for why Xscale 5044 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild 5045 * of hflags from the translator, so do it here. 5046 */ 5047 arm_rebuild_hflags(env); 5048 } 5049 } 5050 5051 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, 5052 bool isread) 5053 { 5054 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { 5055 return CP_ACCESS_TRAP_FP_EL2; 5056 } 5057 if (env->cp15.cptr_el[3] & CPTR_TFP) { 5058 return CP_ACCESS_TRAP_FP_EL3; 5059 } 5060 return CP_ACCESS_OK; 5061 } 5062 5063 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5064 uint64_t value) 5065 { 5066 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; 5067 } 5068 5069 static const ARMCPRegInfo v8_cp_reginfo[] = { 5070 /* Minimal set of EL0-visible registers. This will need to be expanded 5071 * significantly for system emulation of AArch64 CPUs. 5072 */ 5073 { .name = "NZCV", .state = ARM_CP_STATE_AA64, 5074 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, 5075 .access = PL0_RW, .type = ARM_CP_NZCV }, 5076 { .name = "DAIF", .state = ARM_CP_STATE_AA64, 5077 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, 5078 .type = ARM_CP_NO_RAW, 5079 .access = PL0_RW, .accessfn = aa64_daif_access, 5080 .fieldoffset = offsetof(CPUARMState, daif), 5081 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, 5082 { .name = "FPCR", .state = ARM_CP_STATE_AA64, 5083 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, 5084 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 5085 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, 5086 { .name = "FPSR", .state = ARM_CP_STATE_AA64, 5087 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, 5088 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 5089 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, 5090 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, 5091 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, 5092 .access = PL0_R, .type = ARM_CP_NO_RAW, 5093 .readfn = aa64_dczid_read }, 5094 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, 5095 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, 5096 .access = PL0_W, .type = ARM_CP_DC_ZVA, 5097 #ifndef CONFIG_USER_ONLY 5098 /* Avoid overhead of an access check that always passes in user-mode */ 5099 .accessfn = aa64_zva_access, 5100 #endif 5101 }, 5102 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, 5103 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, 5104 .access = PL1_R, .type = ARM_CP_CURRENTEL }, 5105 /* Cache ops: all NOPs since we don't emulate caches */ 5106 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, 5107 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 5108 .access = PL1_W, .type = ARM_CP_NOP, 5109 .accessfn = aa64_cacheop_pou_access }, 5110 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, 5111 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 5112 .access = PL1_W, .type = ARM_CP_NOP, 5113 .accessfn = aa64_cacheop_pou_access }, 5114 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, 5115 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, 5116 .access = PL0_W, .type = ARM_CP_NOP, 5117 .accessfn = aa64_cacheop_pou_access }, 5118 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, 5119 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 5120 .access = PL1_W, .accessfn = aa64_cacheop_poc_access, 5121 .type = ARM_CP_NOP }, 5122 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, 5123 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 5124 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 5125 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, 5126 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, 5127 .access = PL0_W, .type = ARM_CP_NOP, 5128 .accessfn = aa64_cacheop_poc_access }, 5129 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, 5130 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 5131 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 5132 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, 5133 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, 5134 .access = PL0_W, .type = ARM_CP_NOP, 5135 .accessfn = aa64_cacheop_pou_access }, 5136 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, 5137 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, 5138 .access = PL0_W, .type = ARM_CP_NOP, 5139 .accessfn = aa64_cacheop_poc_access }, 5140 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, 5141 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 5142 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 5143 /* TLBI operations */ 5144 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, 5145 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 5146 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 5147 .writefn = tlbi_aa64_vmalle1is_write }, 5148 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, 5149 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 5150 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 5151 .writefn = tlbi_aa64_vae1is_write }, 5152 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, 5153 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 5154 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 5155 .writefn = tlbi_aa64_vmalle1is_write }, 5156 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, 5157 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 5158 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 5159 .writefn = tlbi_aa64_vae1is_write }, 5160 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, 5161 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 5162 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 5163 .writefn = tlbi_aa64_vae1is_write }, 5164 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, 5165 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 5166 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 5167 .writefn = tlbi_aa64_vae1is_write }, 5168 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, 5169 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 5170 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 5171 .writefn = tlbi_aa64_vmalle1_write }, 5172 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, 5173 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 5174 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 5175 .writefn = tlbi_aa64_vae1_write }, 5176 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, 5177 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 5178 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 5179 .writefn = tlbi_aa64_vmalle1_write }, 5180 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, 5181 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 5182 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 5183 .writefn = tlbi_aa64_vae1_write }, 5184 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, 5185 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 5186 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 5187 .writefn = tlbi_aa64_vae1_write }, 5188 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, 5189 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 5190 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 5191 .writefn = tlbi_aa64_vae1_write }, 5192 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, 5193 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 5194 .access = PL2_W, .type = ARM_CP_NOP }, 5195 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, 5196 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 5197 .access = PL2_W, .type = ARM_CP_NOP }, 5198 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, 5199 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 5200 .access = PL2_W, .type = ARM_CP_NO_RAW, 5201 .writefn = tlbi_aa64_alle1is_write }, 5202 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, 5203 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, 5204 .access = PL2_W, .type = ARM_CP_NO_RAW, 5205 .writefn = tlbi_aa64_alle1is_write }, 5206 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, 5207 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 5208 .access = PL2_W, .type = ARM_CP_NOP }, 5209 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, 5210 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 5211 .access = PL2_W, .type = ARM_CP_NOP }, 5212 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, 5213 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 5214 .access = PL2_W, .type = ARM_CP_NO_RAW, 5215 .writefn = tlbi_aa64_alle1_write }, 5216 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, 5217 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, 5218 .access = PL2_W, .type = ARM_CP_NO_RAW, 5219 .writefn = tlbi_aa64_alle1is_write }, 5220 #ifndef CONFIG_USER_ONLY 5221 /* 64 bit address translation operations */ 5222 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 5223 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, 5224 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 5225 .writefn = ats_write64 }, 5226 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 5227 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, 5228 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 5229 .writefn = ats_write64 }, 5230 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, 5231 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, 5232 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 5233 .writefn = ats_write64 }, 5234 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, 5235 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, 5236 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 5237 .writefn = ats_write64 }, 5238 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, 5239 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, 5240 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 5241 .writefn = ats_write64 }, 5242 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, 5243 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, 5244 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 5245 .writefn = ats_write64 }, 5246 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, 5247 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, 5248 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 5249 .writefn = ats_write64 }, 5250 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, 5251 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, 5252 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 5253 .writefn = ats_write64 }, 5254 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ 5255 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, 5256 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, 5257 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 5258 .writefn = ats_write64 }, 5259 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, 5260 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, 5261 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 5262 .writefn = ats_write64 }, 5263 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, 5264 .type = ARM_CP_ALIAS, 5265 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, 5266 .access = PL1_RW, .resetvalue = 0, 5267 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), 5268 .writefn = par_write }, 5269 #endif 5270 /* TLB invalidate last level of translation table walk */ 5271 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 5272 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 5273 .writefn = tlbimva_is_write }, 5274 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 5275 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 5276 .writefn = tlbimvaa_is_write }, 5277 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 5278 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 5279 .writefn = tlbimva_write }, 5280 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 5281 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 5282 .writefn = tlbimvaa_write }, 5283 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 5284 .type = ARM_CP_NO_RAW, .access = PL2_W, 5285 .writefn = tlbimva_hyp_write }, 5286 { .name = "TLBIMVALHIS", 5287 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 5288 .type = ARM_CP_NO_RAW, .access = PL2_W, 5289 .writefn = tlbimva_hyp_is_write }, 5290 { .name = "TLBIIPAS2", 5291 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 5292 .type = ARM_CP_NOP, .access = PL2_W }, 5293 { .name = "TLBIIPAS2IS", 5294 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 5295 .type = ARM_CP_NOP, .access = PL2_W }, 5296 { .name = "TLBIIPAS2L", 5297 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 5298 .type = ARM_CP_NOP, .access = PL2_W }, 5299 { .name = "TLBIIPAS2LIS", 5300 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 5301 .type = ARM_CP_NOP, .access = PL2_W }, 5302 /* 32 bit cache operations */ 5303 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 5304 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 5305 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, 5306 .type = ARM_CP_NOP, .access = PL1_W }, 5307 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 5308 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 5309 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, 5310 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 5311 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, 5312 .type = ARM_CP_NOP, .access = PL1_W }, 5313 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, 5314 .type = ARM_CP_NOP, .access = PL1_W }, 5315 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 5316 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 5317 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 5318 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 5319 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, 5320 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 5321 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 5322 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 5323 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, 5324 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 5325 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, 5326 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 5327 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 5328 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 5329 /* MMU Domain access control / MPU write buffer control */ 5330 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, 5331 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 5332 .writefn = dacr_write, .raw_writefn = raw_write, 5333 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 5334 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 5335 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, 5336 .type = ARM_CP_ALIAS, 5337 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, 5338 .access = PL1_RW, 5339 .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, 5340 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, 5341 .type = ARM_CP_ALIAS, 5342 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, 5343 .access = PL1_RW, 5344 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, 5345 /* We rely on the access checks not allowing the guest to write to the 5346 * state field when SPSel indicates that it's being used as the stack 5347 * pointer. 5348 */ 5349 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, 5350 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, 5351 .access = PL1_RW, .accessfn = sp_el0_access, 5352 .type = ARM_CP_ALIAS, 5353 .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, 5354 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, 5355 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, 5356 .access = PL2_RW, .type = ARM_CP_ALIAS, 5357 .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, 5358 { .name = "SPSel", .state = ARM_CP_STATE_AA64, 5359 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, 5360 .type = ARM_CP_NO_RAW, 5361 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, 5362 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, 5363 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, 5364 .type = ARM_CP_ALIAS, 5365 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), 5366 .access = PL2_RW, .accessfn = fpexc32_access }, 5367 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, 5368 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, 5369 .access = PL2_RW, .resetvalue = 0, 5370 .writefn = dacr_write, .raw_writefn = raw_write, 5371 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, 5372 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, 5373 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, 5374 .access = PL2_RW, .resetvalue = 0, 5375 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, 5376 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, 5377 .type = ARM_CP_ALIAS, 5378 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, 5379 .access = PL2_RW, 5380 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, 5381 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, 5382 .type = ARM_CP_ALIAS, 5383 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, 5384 .access = PL2_RW, 5385 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, 5386 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, 5387 .type = ARM_CP_ALIAS, 5388 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, 5389 .access = PL2_RW, 5390 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, 5391 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, 5392 .type = ARM_CP_ALIAS, 5393 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, 5394 .access = PL2_RW, 5395 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, 5396 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, 5397 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, 5398 .resetvalue = 0, 5399 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, 5400 { .name = "SDCR", .type = ARM_CP_ALIAS, 5401 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, 5402 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5403 .writefn = sdcr_write, 5404 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, 5405 REGINFO_SENTINEL 5406 }; 5407 5408 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ 5409 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { 5410 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 5411 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 5412 .access = PL2_RW, 5413 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 5414 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH, 5415 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5416 .access = PL2_RW, 5417 .type = ARM_CP_CONST, .resetvalue = 0 }, 5418 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 5419 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 5420 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5421 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 5422 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 5423 .access = PL2_RW, 5424 .type = ARM_CP_CONST, .resetvalue = 0 }, 5425 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 5426 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 5427 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5428 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 5429 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 5430 .access = PL2_RW, .type = ARM_CP_CONST, 5431 .resetvalue = 0 }, 5432 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 5433 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 5434 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5435 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 5436 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 5437 .access = PL2_RW, .type = ARM_CP_CONST, 5438 .resetvalue = 0 }, 5439 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 5440 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 5441 .access = PL2_RW, .type = ARM_CP_CONST, 5442 .resetvalue = 0 }, 5443 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 5444 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 5445 .access = PL2_RW, .type = ARM_CP_CONST, 5446 .resetvalue = 0 }, 5447 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 5448 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 5449 .access = PL2_RW, .type = ARM_CP_CONST, 5450 .resetvalue = 0 }, 5451 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 5452 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 5453 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5454 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, 5455 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5456 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5457 .type = ARM_CP_CONST, .resetvalue = 0 }, 5458 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 5459 .cp = 15, .opc1 = 6, .crm = 2, 5460 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5461 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 5462 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 5463 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 5464 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5465 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 5466 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 5467 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5468 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5469 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 5470 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5471 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 5472 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 5473 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5474 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 5475 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5476 .resetvalue = 0 }, 5477 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 5478 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 5479 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5480 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 5481 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 5482 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5483 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 5484 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5485 .resetvalue = 0 }, 5486 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 5487 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 5488 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5489 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 5490 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5491 .resetvalue = 0 }, 5492 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 5493 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 5494 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5495 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 5496 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 5497 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5498 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 5499 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 5500 .access = PL2_RW, .accessfn = access_tda, 5501 .type = ARM_CP_CONST, .resetvalue = 0 }, 5502 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, 5503 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5504 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5505 .type = ARM_CP_CONST, .resetvalue = 0 }, 5506 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 5507 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 5508 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5509 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 5510 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 5511 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5512 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 5513 .type = ARM_CP_CONST, 5514 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 5515 .access = PL2_RW, .resetvalue = 0 }, 5516 REGINFO_SENTINEL 5517 }; 5518 5519 /* Ditto, but for registers which exist in ARMv8 but not v7 */ 5520 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { 5521 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 5522 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 5523 .access = PL2_RW, 5524 .type = ARM_CP_CONST, .resetvalue = 0 }, 5525 REGINFO_SENTINEL 5526 }; 5527 5528 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) 5529 { 5530 ARMCPU *cpu = env_archcpu(env); 5531 5532 if (arm_feature(env, ARM_FEATURE_V8)) { 5533 valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */ 5534 } else { 5535 valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */ 5536 } 5537 5538 if (arm_feature(env, ARM_FEATURE_EL3)) { 5539 valid_mask &= ~HCR_HCD; 5540 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { 5541 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. 5542 * However, if we're using the SMC PSCI conduit then QEMU is 5543 * effectively acting like EL3 firmware and so the guest at 5544 * EL2 should retain the ability to prevent EL1 from being 5545 * able to make SMC calls into the ersatz firmware, so in 5546 * that case HCR.TSC should be read/write. 5547 */ 5548 valid_mask &= ~HCR_TSC; 5549 } 5550 5551 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5552 if (cpu_isar_feature(aa64_vh, cpu)) { 5553 valid_mask |= HCR_E2H; 5554 } 5555 if (cpu_isar_feature(aa64_lor, cpu)) { 5556 valid_mask |= HCR_TLOR; 5557 } 5558 if (cpu_isar_feature(aa64_pauth, cpu)) { 5559 valid_mask |= HCR_API | HCR_APK; 5560 } 5561 if (cpu_isar_feature(aa64_mte, cpu)) { 5562 valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5; 5563 } 5564 } 5565 5566 /* Clear RES0 bits. */ 5567 value &= valid_mask; 5568 5569 /* 5570 * These bits change the MMU setup: 5571 * HCR_VM enables stage 2 translation 5572 * HCR_PTW forbids certain page-table setups 5573 * HCR_DC disables stage1 and enables stage2 translation 5574 * HCR_DCT enables tagging on (disabled) stage1 translation 5575 */ 5576 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT)) { 5577 tlb_flush(CPU(cpu)); 5578 } 5579 env->cp15.hcr_el2 = value; 5580 5581 /* 5582 * Updates to VI and VF require us to update the status of 5583 * virtual interrupts, which are the logical OR of these bits 5584 * and the state of the input lines from the GIC. (This requires 5585 * that we have the iothread lock, which is done by marking the 5586 * reginfo structs as ARM_CP_IO.) 5587 * Note that if a write to HCR pends a VIRQ or VFIQ it is never 5588 * possible for it to be taken immediately, because VIRQ and 5589 * VFIQ are masked unless running at EL0 or EL1, and HCR 5590 * can only be written at EL2. 5591 */ 5592 g_assert(qemu_mutex_iothread_locked()); 5593 arm_cpu_update_virq(cpu); 5594 arm_cpu_update_vfiq(cpu); 5595 } 5596 5597 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 5598 { 5599 do_hcr_write(env, value, 0); 5600 } 5601 5602 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri, 5603 uint64_t value) 5604 { 5605 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */ 5606 value = deposit64(env->cp15.hcr_el2, 32, 32, value); 5607 do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32)); 5608 } 5609 5610 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, 5611 uint64_t value) 5612 { 5613 /* Handle HCR write, i.e. write to low half of HCR_EL2 */ 5614 value = deposit64(env->cp15.hcr_el2, 0, 32, value); 5615 do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32)); 5616 } 5617 5618 /* 5619 * Return the effective value of HCR_EL2. 5620 * Bits that are not included here: 5621 * RW (read from SCR_EL3.RW as needed) 5622 */ 5623 uint64_t arm_hcr_el2_eff(CPUARMState *env) 5624 { 5625 uint64_t ret = env->cp15.hcr_el2; 5626 5627 if (!arm_is_el2_enabled(env)) { 5628 /* 5629 * "This register has no effect if EL2 is not enabled in the 5630 * current Security state". This is ARMv8.4-SecEL2 speak for 5631 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1). 5632 * 5633 * Prior to that, the language was "In an implementation that 5634 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves 5635 * as if this field is 0 for all purposes other than a direct 5636 * read or write access of HCR_EL2". With lots of enumeration 5637 * on a per-field basis. In current QEMU, this is condition 5638 * is arm_is_secure_below_el3. 5639 * 5640 * Since the v8.4 language applies to the entire register, and 5641 * appears to be backward compatible, use that. 5642 */ 5643 return 0; 5644 } 5645 5646 /* 5647 * For a cpu that supports both aarch64 and aarch32, we can set bits 5648 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32. 5649 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32. 5650 */ 5651 if (!arm_el_is_aa64(env, 2)) { 5652 uint64_t aa32_valid; 5653 5654 /* 5655 * These bits are up-to-date as of ARMv8.6. 5656 * For HCR, it's easiest to list just the 2 bits that are invalid. 5657 * For HCR2, list those that are valid. 5658 */ 5659 aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ); 5660 aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE | 5661 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS); 5662 ret &= aa32_valid; 5663 } 5664 5665 if (ret & HCR_TGE) { 5666 /* These bits are up-to-date as of ARMv8.6. */ 5667 if (ret & HCR_E2H) { 5668 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO | 5669 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE | 5670 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU | 5671 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE | 5672 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT | 5673 HCR_TTLBIS | HCR_TTLBOS | HCR_TID5); 5674 } else { 5675 ret |= HCR_FMO | HCR_IMO | HCR_AMO; 5676 } 5677 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE | 5678 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR | 5679 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM | 5680 HCR_TLOR); 5681 } 5682 5683 return ret; 5684 } 5685 5686 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 5687 uint64_t value) 5688 { 5689 /* 5690 * For A-profile AArch32 EL3, if NSACR.CP10 5691 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 5692 */ 5693 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 5694 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 5695 value &= ~(0x3 << 10); 5696 value |= env->cp15.cptr_el[2] & (0x3 << 10); 5697 } 5698 env->cp15.cptr_el[2] = value; 5699 } 5700 5701 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri) 5702 { 5703 /* 5704 * For A-profile AArch32 EL3, if NSACR.CP10 5705 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 5706 */ 5707 uint64_t value = env->cp15.cptr_el[2]; 5708 5709 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 5710 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 5711 value |= 0x3 << 10; 5712 } 5713 return value; 5714 } 5715 5716 static const ARMCPRegInfo el2_cp_reginfo[] = { 5717 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 5718 .type = ARM_CP_IO, 5719 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5720 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 5721 .writefn = hcr_write }, 5722 { .name = "HCR", .state = ARM_CP_STATE_AA32, 5723 .type = ARM_CP_ALIAS | ARM_CP_IO, 5724 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5725 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 5726 .writefn = hcr_writelow }, 5727 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 5728 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 5729 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5730 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, 5731 .type = ARM_CP_ALIAS, 5732 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, 5733 .access = PL2_RW, 5734 .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, 5735 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 5736 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 5737 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, 5738 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 5739 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 5740 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, 5741 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 5742 .type = ARM_CP_ALIAS, 5743 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 5744 .access = PL2_RW, 5745 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, 5746 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, 5747 .type = ARM_CP_ALIAS, 5748 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, 5749 .access = PL2_RW, 5750 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, 5751 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 5752 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 5753 .access = PL2_RW, .writefn = vbar_write, 5754 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), 5755 .resetvalue = 0 }, 5756 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, 5757 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, 5758 .access = PL3_RW, .type = ARM_CP_ALIAS, 5759 .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, 5760 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 5761 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 5762 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, 5763 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]), 5764 .readfn = cptr_el2_read, .writefn = cptr_el2_write }, 5765 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 5766 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 5767 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), 5768 .resetvalue = 0 }, 5769 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 5770 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 5771 .access = PL2_RW, .type = ARM_CP_ALIAS, 5772 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, 5773 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 5774 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 5775 .access = PL2_RW, .type = ARM_CP_CONST, 5776 .resetvalue = 0 }, 5777 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ 5778 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 5779 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 5780 .access = PL2_RW, .type = ARM_CP_CONST, 5781 .resetvalue = 0 }, 5782 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 5783 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 5784 .access = PL2_RW, .type = ARM_CP_CONST, 5785 .resetvalue = 0 }, 5786 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 5787 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 5788 .access = PL2_RW, .type = ARM_CP_CONST, 5789 .resetvalue = 0 }, 5790 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 5791 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 5792 .access = PL2_RW, .writefn = vmsa_tcr_el12_write, 5793 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */ 5794 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, 5795 { .name = "VTCR", .state = ARM_CP_STATE_AA32, 5796 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5797 .type = ARM_CP_ALIAS, 5798 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5799 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 5800 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, 5801 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5802 .access = PL2_RW, 5803 /* no .writefn needed as this can't cause an ASID change; 5804 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 5805 */ 5806 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 5807 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 5808 .cp = 15, .opc1 = 6, .crm = 2, 5809 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 5810 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5811 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), 5812 .writefn = vttbr_write }, 5813 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 5814 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 5815 .access = PL2_RW, .writefn = vttbr_write, 5816 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, 5817 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 5818 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 5819 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, 5820 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, 5821 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5822 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 5823 .access = PL2_RW, .resetvalue = 0, 5824 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, 5825 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 5826 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 5827 .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write, 5828 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 5829 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 5830 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 5831 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 5832 { .name = "TLBIALLNSNH", 5833 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 5834 .type = ARM_CP_NO_RAW, .access = PL2_W, 5835 .writefn = tlbiall_nsnh_write }, 5836 { .name = "TLBIALLNSNHIS", 5837 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 5838 .type = ARM_CP_NO_RAW, .access = PL2_W, 5839 .writefn = tlbiall_nsnh_is_write }, 5840 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 5841 .type = ARM_CP_NO_RAW, .access = PL2_W, 5842 .writefn = tlbiall_hyp_write }, 5843 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 5844 .type = ARM_CP_NO_RAW, .access = PL2_W, 5845 .writefn = tlbiall_hyp_is_write }, 5846 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 5847 .type = ARM_CP_NO_RAW, .access = PL2_W, 5848 .writefn = tlbimva_hyp_write }, 5849 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 5850 .type = ARM_CP_NO_RAW, .access = PL2_W, 5851 .writefn = tlbimva_hyp_is_write }, 5852 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, 5853 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 5854 .type = ARM_CP_NO_RAW, .access = PL2_W, 5855 .writefn = tlbi_aa64_alle2_write }, 5856 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, 5857 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 5858 .type = ARM_CP_NO_RAW, .access = PL2_W, 5859 .writefn = tlbi_aa64_vae2_write }, 5860 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, 5861 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 5862 .access = PL2_W, .type = ARM_CP_NO_RAW, 5863 .writefn = tlbi_aa64_vae2_write }, 5864 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, 5865 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 5866 .access = PL2_W, .type = ARM_CP_NO_RAW, 5867 .writefn = tlbi_aa64_alle2is_write }, 5868 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, 5869 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 5870 .type = ARM_CP_NO_RAW, .access = PL2_W, 5871 .writefn = tlbi_aa64_vae2is_write }, 5872 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, 5873 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 5874 .access = PL2_W, .type = ARM_CP_NO_RAW, 5875 .writefn = tlbi_aa64_vae2is_write }, 5876 #ifndef CONFIG_USER_ONLY 5877 /* Unlike the other EL2-related AT operations, these must 5878 * UNDEF from EL3 if EL2 is not implemented, which is why we 5879 * define them here rather than with the rest of the AT ops. 5880 */ 5881 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, 5882 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 5883 .access = PL2_W, .accessfn = at_s1e2_access, 5884 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, 5885 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, 5886 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 5887 .access = PL2_W, .accessfn = at_s1e2_access, 5888 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, 5889 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE 5890 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 5891 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose 5892 * to behave as if SCR.NS was 1. 5893 */ 5894 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 5895 .access = PL2_W, 5896 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 5897 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 5898 .access = PL2_W, 5899 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 5900 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 5901 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 5902 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the 5903 * reset values as IMPDEF. We choose to reset to 3 to comply with 5904 * both ARMv7 and ARMv8. 5905 */ 5906 .access = PL2_RW, .resetvalue = 3, 5907 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, 5908 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 5909 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 5910 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 5911 .writefn = gt_cntvoff_write, 5912 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 5913 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 5914 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, 5915 .writefn = gt_cntvoff_write, 5916 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 5917 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 5918 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 5919 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 5920 .type = ARM_CP_IO, .access = PL2_RW, 5921 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 5922 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 5923 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 5924 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, 5925 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 5926 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 5927 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 5928 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 5929 .resetfn = gt_hyp_timer_reset, 5930 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, 5931 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 5932 .type = ARM_CP_IO, 5933 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 5934 .access = PL2_RW, 5935 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), 5936 .resetvalue = 0, 5937 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, 5938 #endif 5939 /* The only field of MDCR_EL2 that has a defined architectural reset value 5940 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N. 5941 */ 5942 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 5943 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 5944 .access = PL2_RW, .resetvalue = PMCR_NUM_COUNTERS, 5945 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, 5946 { .name = "HPFAR", .state = ARM_CP_STATE_AA32, 5947 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5948 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5949 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 5950 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, 5951 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5952 .access = PL2_RW, 5953 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 5954 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 5955 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 5956 .access = PL2_RW, 5957 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, 5958 REGINFO_SENTINEL 5959 }; 5960 5961 static const ARMCPRegInfo el2_v8_cp_reginfo[] = { 5962 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 5963 .type = ARM_CP_ALIAS | ARM_CP_IO, 5964 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 5965 .access = PL2_RW, 5966 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), 5967 .writefn = hcr_writehigh }, 5968 REGINFO_SENTINEL 5969 }; 5970 5971 static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri, 5972 bool isread) 5973 { 5974 if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) { 5975 return CP_ACCESS_OK; 5976 } 5977 return CP_ACCESS_TRAP_UNCATEGORIZED; 5978 } 5979 5980 static const ARMCPRegInfo el2_sec_cp_reginfo[] = { 5981 { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64, 5982 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0, 5983 .access = PL2_RW, .accessfn = sel2_access, 5984 .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) }, 5985 { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64, 5986 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2, 5987 .access = PL2_RW, .accessfn = sel2_access, 5988 .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) }, 5989 REGINFO_SENTINEL 5990 }; 5991 5992 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 5993 bool isread) 5994 { 5995 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. 5996 * At Secure EL1 it traps to EL3 or EL2. 5997 */ 5998 if (arm_current_el(env) == 3) { 5999 return CP_ACCESS_OK; 6000 } 6001 if (arm_is_secure_below_el3(env)) { 6002 if (env->cp15.scr_el3 & SCR_EEL2) { 6003 return CP_ACCESS_TRAP_EL2; 6004 } 6005 return CP_ACCESS_TRAP_EL3; 6006 } 6007 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ 6008 if (isread) { 6009 return CP_ACCESS_OK; 6010 } 6011 return CP_ACCESS_TRAP_UNCATEGORIZED; 6012 } 6013 6014 static const ARMCPRegInfo el3_cp_reginfo[] = { 6015 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, 6016 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, 6017 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), 6018 .resetfn = scr_reset, .writefn = scr_write }, 6019 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL, 6020 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, 6021 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 6022 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), 6023 .writefn = scr_write }, 6024 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, 6025 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, 6026 .access = PL3_RW, .resetvalue = 0, 6027 .fieldoffset = offsetof(CPUARMState, cp15.sder) }, 6028 { .name = "SDER", 6029 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, 6030 .access = PL3_RW, .resetvalue = 0, 6031 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, 6032 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 6033 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 6034 .writefn = vbar_write, .resetvalue = 0, 6035 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, 6036 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, 6037 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, 6038 .access = PL3_RW, .resetvalue = 0, 6039 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, 6040 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, 6041 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, 6042 .access = PL3_RW, 6043 /* no .writefn needed as this can't cause an ASID change; 6044 * we must provide a .raw_writefn and .resetfn because we handle 6045 * reset and migration for the AArch32 TTBCR(S), which might be 6046 * using mask and base_mask. 6047 */ 6048 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, 6049 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, 6050 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, 6051 .type = ARM_CP_ALIAS, 6052 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, 6053 .access = PL3_RW, 6054 .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, 6055 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, 6056 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, 6057 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, 6058 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, 6059 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, 6060 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, 6061 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, 6062 .type = ARM_CP_ALIAS, 6063 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, 6064 .access = PL3_RW, 6065 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, 6066 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, 6067 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, 6068 .access = PL3_RW, .writefn = vbar_write, 6069 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), 6070 .resetvalue = 0 }, 6071 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, 6072 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, 6073 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, 6074 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, 6075 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, 6076 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, 6077 .access = PL3_RW, .resetvalue = 0, 6078 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, 6079 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, 6080 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, 6081 .access = PL3_RW, .type = ARM_CP_CONST, 6082 .resetvalue = 0 }, 6083 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, 6084 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, 6085 .access = PL3_RW, .type = ARM_CP_CONST, 6086 .resetvalue = 0 }, 6087 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, 6088 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, 6089 .access = PL3_RW, .type = ARM_CP_CONST, 6090 .resetvalue = 0 }, 6091 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, 6092 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, 6093 .access = PL3_W, .type = ARM_CP_NO_RAW, 6094 .writefn = tlbi_aa64_alle3is_write }, 6095 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, 6096 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, 6097 .access = PL3_W, .type = ARM_CP_NO_RAW, 6098 .writefn = tlbi_aa64_vae3is_write }, 6099 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, 6100 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, 6101 .access = PL3_W, .type = ARM_CP_NO_RAW, 6102 .writefn = tlbi_aa64_vae3is_write }, 6103 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, 6104 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, 6105 .access = PL3_W, .type = ARM_CP_NO_RAW, 6106 .writefn = tlbi_aa64_alle3_write }, 6107 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, 6108 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, 6109 .access = PL3_W, .type = ARM_CP_NO_RAW, 6110 .writefn = tlbi_aa64_vae3_write }, 6111 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, 6112 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, 6113 .access = PL3_W, .type = ARM_CP_NO_RAW, 6114 .writefn = tlbi_aa64_vae3_write }, 6115 REGINFO_SENTINEL 6116 }; 6117 6118 #ifndef CONFIG_USER_ONLY 6119 /* Test if system register redirection is to occur in the current state. */ 6120 static bool redirect_for_e2h(CPUARMState *env) 6121 { 6122 return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H); 6123 } 6124 6125 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri) 6126 { 6127 CPReadFn *readfn; 6128 6129 if (redirect_for_e2h(env)) { 6130 /* Switch to the saved EL2 version of the register. */ 6131 ri = ri->opaque; 6132 readfn = ri->readfn; 6133 } else { 6134 readfn = ri->orig_readfn; 6135 } 6136 if (readfn == NULL) { 6137 readfn = raw_read; 6138 } 6139 return readfn(env, ri); 6140 } 6141 6142 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri, 6143 uint64_t value) 6144 { 6145 CPWriteFn *writefn; 6146 6147 if (redirect_for_e2h(env)) { 6148 /* Switch to the saved EL2 version of the register. */ 6149 ri = ri->opaque; 6150 writefn = ri->writefn; 6151 } else { 6152 writefn = ri->orig_writefn; 6153 } 6154 if (writefn == NULL) { 6155 writefn = raw_write; 6156 } 6157 writefn(env, ri, value); 6158 } 6159 6160 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) 6161 { 6162 struct E2HAlias { 6163 uint32_t src_key, dst_key, new_key; 6164 const char *src_name, *dst_name, *new_name; 6165 bool (*feature)(const ARMISARegisters *id); 6166 }; 6167 6168 #define K(op0, op1, crn, crm, op2) \ 6169 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) 6170 6171 static const struct E2HAlias aliases[] = { 6172 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0), 6173 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" }, 6174 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2), 6175 "CPACR", "CPTR_EL2", "CPACR_EL12" }, 6176 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0), 6177 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" }, 6178 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1), 6179 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" }, 6180 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2), 6181 "TCR_EL1", "TCR_EL2", "TCR_EL12" }, 6182 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0), 6183 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" }, 6184 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1), 6185 "ELR_EL1", "ELR_EL2", "ELR_EL12" }, 6186 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0), 6187 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" }, 6188 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1), 6189 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" }, 6190 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0), 6191 "ESR_EL1", "ESR_EL2", "ESR_EL12" }, 6192 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0), 6193 "FAR_EL1", "FAR_EL2", "FAR_EL12" }, 6194 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0), 6195 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" }, 6196 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0), 6197 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" }, 6198 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0), 6199 "VBAR", "VBAR_EL2", "VBAR_EL12" }, 6200 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1), 6201 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" }, 6202 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0), 6203 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" }, 6204 6205 /* 6206 * Note that redirection of ZCR is mentioned in the description 6207 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but 6208 * not in the summary table. 6209 */ 6210 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0), 6211 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve }, 6212 6213 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0), 6214 "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte }, 6215 6216 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ 6217 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ 6218 }; 6219 #undef K 6220 6221 size_t i; 6222 6223 for (i = 0; i < ARRAY_SIZE(aliases); i++) { 6224 const struct E2HAlias *a = &aliases[i]; 6225 ARMCPRegInfo *src_reg, *dst_reg; 6226 6227 if (a->feature && !a->feature(&cpu->isar)) { 6228 continue; 6229 } 6230 6231 src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key); 6232 dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key); 6233 g_assert(src_reg != NULL); 6234 g_assert(dst_reg != NULL); 6235 6236 /* Cross-compare names to detect typos in the keys. */ 6237 g_assert(strcmp(src_reg->name, a->src_name) == 0); 6238 g_assert(strcmp(dst_reg->name, a->dst_name) == 0); 6239 6240 /* None of the core system registers use opaque; we will. */ 6241 g_assert(src_reg->opaque == NULL); 6242 6243 /* Create alias before redirection so we dup the right data. */ 6244 if (a->new_key) { 6245 ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo)); 6246 uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t)); 6247 bool ok; 6248 6249 new_reg->name = a->new_name; 6250 new_reg->type |= ARM_CP_ALIAS; 6251 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ 6252 new_reg->access &= PL2_RW | PL3_RW; 6253 6254 ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg); 6255 g_assert(ok); 6256 } 6257 6258 src_reg->opaque = dst_reg; 6259 src_reg->orig_readfn = src_reg->readfn ?: raw_read; 6260 src_reg->orig_writefn = src_reg->writefn ?: raw_write; 6261 if (!src_reg->raw_readfn) { 6262 src_reg->raw_readfn = raw_read; 6263 } 6264 if (!src_reg->raw_writefn) { 6265 src_reg->raw_writefn = raw_write; 6266 } 6267 src_reg->readfn = el2_e2h_read; 6268 src_reg->writefn = el2_e2h_write; 6269 } 6270 } 6271 #endif 6272 6273 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 6274 bool isread) 6275 { 6276 int cur_el = arm_current_el(env); 6277 6278 if (cur_el < 2) { 6279 uint64_t hcr = arm_hcr_el2_eff(env); 6280 6281 if (cur_el == 0) { 6282 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 6283 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) { 6284 return CP_ACCESS_TRAP_EL2; 6285 } 6286 } else { 6287 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { 6288 return CP_ACCESS_TRAP; 6289 } 6290 if (hcr & HCR_TID2) { 6291 return CP_ACCESS_TRAP_EL2; 6292 } 6293 } 6294 } else if (hcr & HCR_TID2) { 6295 return CP_ACCESS_TRAP_EL2; 6296 } 6297 } 6298 6299 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) { 6300 return CP_ACCESS_TRAP_EL2; 6301 } 6302 6303 return CP_ACCESS_OK; 6304 } 6305 6306 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 6307 uint64_t value) 6308 { 6309 /* Writes to OSLAR_EL1 may update the OS lock status, which can be 6310 * read via a bit in OSLSR_EL1. 6311 */ 6312 int oslock; 6313 6314 if (ri->state == ARM_CP_STATE_AA32) { 6315 oslock = (value == 0xC5ACCE55); 6316 } else { 6317 oslock = value & 1; 6318 } 6319 6320 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 6321 } 6322 6323 static const ARMCPRegInfo debug_cp_reginfo[] = { 6324 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 6325 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 6326 * unlike DBGDRAR it is never accessible from EL0. 6327 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 6328 * accessor. 6329 */ 6330 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 6331 .access = PL0_R, .accessfn = access_tdra, 6332 .type = ARM_CP_CONST, .resetvalue = 0 }, 6333 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 6334 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 6335 .access = PL1_R, .accessfn = access_tdra, 6336 .type = ARM_CP_CONST, .resetvalue = 0 }, 6337 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 6338 .access = PL0_R, .accessfn = access_tdra, 6339 .type = ARM_CP_CONST, .resetvalue = 0 }, 6340 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 6341 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 6342 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 6343 .access = PL1_RW, .accessfn = access_tda, 6344 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 6345 .resetvalue = 0 }, 6346 /* 6347 * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external 6348 * Debug Communication Channel is not implemented. 6349 */ 6350 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64, 6351 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0, 6352 .access = PL0_R, .accessfn = access_tda, 6353 .type = ARM_CP_CONST, .resetvalue = 0 }, 6354 /* 6355 * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as 6356 * it is unlikely a guest will care. 6357 * We don't implement the configurable EL0 access. 6358 */ 6359 { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32, 6360 .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 6361 .type = ARM_CP_ALIAS, 6362 .access = PL1_R, .accessfn = access_tda, 6363 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 6364 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 6365 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 6366 .access = PL1_W, .type = ARM_CP_NO_RAW, 6367 .accessfn = access_tdosa, 6368 .writefn = oslar_write }, 6369 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 6370 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 6371 .access = PL1_R, .resetvalue = 10, 6372 .accessfn = access_tdosa, 6373 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 6374 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 6375 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 6376 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 6377 .access = PL1_RW, .accessfn = access_tdosa, 6378 .type = ARM_CP_NOP }, 6379 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't 6380 * implement vector catch debug events yet. 6381 */ 6382 { .name = "DBGVCR", 6383 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 6384 .access = PL1_RW, .accessfn = access_tda, 6385 .type = ARM_CP_NOP }, 6386 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 6387 * to save and restore a 32-bit guest's DBGVCR) 6388 */ 6389 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 6390 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 6391 .access = PL2_RW, .accessfn = access_tda, 6392 .type = ARM_CP_NOP }, 6393 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications 6394 * Channel but Linux may try to access this register. The 32-bit 6395 * alias is DBGDCCINT. 6396 */ 6397 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 6398 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 6399 .access = PL1_RW, .accessfn = access_tda, 6400 .type = ARM_CP_NOP }, 6401 REGINFO_SENTINEL 6402 }; 6403 6404 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 6405 /* 64 bit access versions of the (dummy) debug registers */ 6406 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 6407 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 6408 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 6409 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 6410 REGINFO_SENTINEL 6411 }; 6412 6413 /* Return the exception level to which exceptions should be taken 6414 * via SVEAccessTrap. If an exception should be routed through 6415 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should 6416 * take care of raising that exception. 6417 * C.f. the ARM pseudocode function CheckSVEEnabled. 6418 */ 6419 int sve_exception_el(CPUARMState *env, int el) 6420 { 6421 #ifndef CONFIG_USER_ONLY 6422 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 6423 6424 if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 6425 bool disabled = false; 6426 6427 /* The CPACR.ZEN controls traps to EL1: 6428 * 0, 2 : trap EL0 and EL1 accesses 6429 * 1 : trap only EL0 accesses 6430 * 3 : trap no accesses 6431 */ 6432 if (!extract32(env->cp15.cpacr_el1, 16, 1)) { 6433 disabled = true; 6434 } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) { 6435 disabled = el == 0; 6436 } 6437 if (disabled) { 6438 /* route_to_el2 */ 6439 return hcr_el2 & HCR_TGE ? 2 : 1; 6440 } 6441 6442 /* Check CPACR.FPEN. */ 6443 if (!extract32(env->cp15.cpacr_el1, 20, 1)) { 6444 disabled = true; 6445 } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) { 6446 disabled = el == 0; 6447 } 6448 if (disabled) { 6449 return 0; 6450 } 6451 } 6452 6453 /* CPTR_EL2. Since TZ and TFP are positive, 6454 * they will be zero when EL2 is not present. 6455 */ 6456 if (el <= 2 && arm_is_el2_enabled(env)) { 6457 if (env->cp15.cptr_el[2] & CPTR_TZ) { 6458 return 2; 6459 } 6460 if (env->cp15.cptr_el[2] & CPTR_TFP) { 6461 return 0; 6462 } 6463 } 6464 6465 /* CPTR_EL3. Since EZ is negative we must check for EL3. */ 6466 if (arm_feature(env, ARM_FEATURE_EL3) 6467 && !(env->cp15.cptr_el[3] & CPTR_EZ)) { 6468 return 3; 6469 } 6470 #endif 6471 return 0; 6472 } 6473 6474 uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len) 6475 { 6476 uint32_t end_len; 6477 6478 start_len = MIN(start_len, ARM_MAX_VQ - 1); 6479 end_len = start_len; 6480 6481 if (!test_bit(start_len, cpu->sve_vq_map)) { 6482 end_len = find_last_bit(cpu->sve_vq_map, start_len); 6483 assert(end_len < start_len); 6484 } 6485 return end_len; 6486 } 6487 6488 /* 6489 * Given that SVE is enabled, return the vector length for EL. 6490 */ 6491 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) 6492 { 6493 ARMCPU *cpu = env_archcpu(env); 6494 uint32_t zcr_len = cpu->sve_max_vq - 1; 6495 6496 if (el <= 1) { 6497 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]); 6498 } 6499 if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) { 6500 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); 6501 } 6502 if (arm_feature(env, ARM_FEATURE_EL3)) { 6503 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); 6504 } 6505 6506 return aarch64_sve_zcr_get_valid_len(cpu, zcr_len); 6507 } 6508 6509 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6510 uint64_t value) 6511 { 6512 int cur_el = arm_current_el(env); 6513 int old_len = sve_zcr_len_for_el(env, cur_el); 6514 int new_len; 6515 6516 /* Bits other than [3:0] are RAZ/WI. */ 6517 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16); 6518 raw_write(env, ri, value & 0xf); 6519 6520 /* 6521 * Because we arrived here, we know both FP and SVE are enabled; 6522 * otherwise we would have trapped access to the ZCR_ELn register. 6523 */ 6524 new_len = sve_zcr_len_for_el(env, cur_el); 6525 if (new_len < old_len) { 6526 aarch64_sve_narrow_vq(env, new_len + 1); 6527 } 6528 } 6529 6530 static const ARMCPRegInfo zcr_el1_reginfo = { 6531 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, 6532 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, 6533 .access = PL1_RW, .type = ARM_CP_SVE, 6534 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), 6535 .writefn = zcr_write, .raw_writefn = raw_write 6536 }; 6537 6538 static const ARMCPRegInfo zcr_el2_reginfo = { 6539 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 6540 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 6541 .access = PL2_RW, .type = ARM_CP_SVE, 6542 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), 6543 .writefn = zcr_write, .raw_writefn = raw_write 6544 }; 6545 6546 static const ARMCPRegInfo zcr_no_el2_reginfo = { 6547 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 6548 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 6549 .access = PL2_RW, .type = ARM_CP_SVE, 6550 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore 6551 }; 6552 6553 static const ARMCPRegInfo zcr_el3_reginfo = { 6554 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, 6555 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, 6556 .access = PL3_RW, .type = ARM_CP_SVE, 6557 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), 6558 .writefn = zcr_write, .raw_writefn = raw_write 6559 }; 6560 6561 void hw_watchpoint_update(ARMCPU *cpu, int n) 6562 { 6563 CPUARMState *env = &cpu->env; 6564 vaddr len = 0; 6565 vaddr wvr = env->cp15.dbgwvr[n]; 6566 uint64_t wcr = env->cp15.dbgwcr[n]; 6567 int mask; 6568 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 6569 6570 if (env->cpu_watchpoint[n]) { 6571 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 6572 env->cpu_watchpoint[n] = NULL; 6573 } 6574 6575 if (!extract64(wcr, 0, 1)) { 6576 /* E bit clear : watchpoint disabled */ 6577 return; 6578 } 6579 6580 switch (extract64(wcr, 3, 2)) { 6581 case 0: 6582 /* LSC 00 is reserved and must behave as if the wp is disabled */ 6583 return; 6584 case 1: 6585 flags |= BP_MEM_READ; 6586 break; 6587 case 2: 6588 flags |= BP_MEM_WRITE; 6589 break; 6590 case 3: 6591 flags |= BP_MEM_ACCESS; 6592 break; 6593 } 6594 6595 /* Attempts to use both MASK and BAS fields simultaneously are 6596 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 6597 * thus generating a watchpoint for every byte in the masked region. 6598 */ 6599 mask = extract64(wcr, 24, 4); 6600 if (mask == 1 || mask == 2) { 6601 /* Reserved values of MASK; we must act as if the mask value was 6602 * some non-reserved value, or as if the watchpoint were disabled. 6603 * We choose the latter. 6604 */ 6605 return; 6606 } else if (mask) { 6607 /* Watchpoint covers an aligned area up to 2GB in size */ 6608 len = 1ULL << mask; 6609 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 6610 * whether the watchpoint fires when the unmasked bits match; we opt 6611 * to generate the exceptions. 6612 */ 6613 wvr &= ~(len - 1); 6614 } else { 6615 /* Watchpoint covers bytes defined by the byte address select bits */ 6616 int bas = extract64(wcr, 5, 8); 6617 int basstart; 6618 6619 if (extract64(wvr, 2, 1)) { 6620 /* Deprecated case of an only 4-aligned address. BAS[7:4] are 6621 * ignored, and BAS[3:0] define which bytes to watch. 6622 */ 6623 bas &= 0xf; 6624 } 6625 6626 if (bas == 0) { 6627 /* This must act as if the watchpoint is disabled */ 6628 return; 6629 } 6630 6631 /* The BAS bits are supposed to be programmed to indicate a contiguous 6632 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 6633 * we fire for each byte in the word/doubleword addressed by the WVR. 6634 * We choose to ignore any non-zero bits after the first range of 1s. 6635 */ 6636 basstart = ctz32(bas); 6637 len = cto32(bas >> basstart); 6638 wvr += basstart; 6639 } 6640 6641 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 6642 &env->cpu_watchpoint[n]); 6643 } 6644 6645 void hw_watchpoint_update_all(ARMCPU *cpu) 6646 { 6647 int i; 6648 CPUARMState *env = &cpu->env; 6649 6650 /* Completely clear out existing QEMU watchpoints and our array, to 6651 * avoid possible stale entries following migration load. 6652 */ 6653 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 6654 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 6655 6656 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 6657 hw_watchpoint_update(cpu, i); 6658 } 6659 } 6660 6661 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6662 uint64_t value) 6663 { 6664 ARMCPU *cpu = env_archcpu(env); 6665 int i = ri->crm; 6666 6667 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the 6668 * register reads and behaves as if values written are sign extended. 6669 * Bits [1:0] are RES0. 6670 */ 6671 value = sextract64(value, 0, 49) & ~3ULL; 6672 6673 raw_write(env, ri, value); 6674 hw_watchpoint_update(cpu, i); 6675 } 6676 6677 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6678 uint64_t value) 6679 { 6680 ARMCPU *cpu = env_archcpu(env); 6681 int i = ri->crm; 6682 6683 raw_write(env, ri, value); 6684 hw_watchpoint_update(cpu, i); 6685 } 6686 6687 void hw_breakpoint_update(ARMCPU *cpu, int n) 6688 { 6689 CPUARMState *env = &cpu->env; 6690 uint64_t bvr = env->cp15.dbgbvr[n]; 6691 uint64_t bcr = env->cp15.dbgbcr[n]; 6692 vaddr addr; 6693 int bt; 6694 int flags = BP_CPU; 6695 6696 if (env->cpu_breakpoint[n]) { 6697 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 6698 env->cpu_breakpoint[n] = NULL; 6699 } 6700 6701 if (!extract64(bcr, 0, 1)) { 6702 /* E bit clear : watchpoint disabled */ 6703 return; 6704 } 6705 6706 bt = extract64(bcr, 20, 4); 6707 6708 switch (bt) { 6709 case 4: /* unlinked address mismatch (reserved if AArch64) */ 6710 case 5: /* linked address mismatch (reserved if AArch64) */ 6711 qemu_log_mask(LOG_UNIMP, 6712 "arm: address mismatch breakpoint types not implemented\n"); 6713 return; 6714 case 0: /* unlinked address match */ 6715 case 1: /* linked address match */ 6716 { 6717 /* Bits [63:49] are hardwired to the value of bit [48]; that is, 6718 * we behave as if the register was sign extended. Bits [1:0] are 6719 * RES0. The BAS field is used to allow setting breakpoints on 16 6720 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether 6721 * a bp will fire if the addresses covered by the bp and the addresses 6722 * covered by the insn overlap but the insn doesn't start at the 6723 * start of the bp address range. We choose to require the insn and 6724 * the bp to have the same address. The constraints on writing to 6725 * BAS enforced in dbgbcr_write mean we have only four cases: 6726 * 0b0000 => no breakpoint 6727 * 0b0011 => breakpoint on addr 6728 * 0b1100 => breakpoint on addr + 2 6729 * 0b1111 => breakpoint on addr 6730 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 6731 */ 6732 int bas = extract64(bcr, 5, 4); 6733 addr = sextract64(bvr, 0, 49) & ~3ULL; 6734 if (bas == 0) { 6735 return; 6736 } 6737 if (bas == 0xc) { 6738 addr += 2; 6739 } 6740 break; 6741 } 6742 case 2: /* unlinked context ID match */ 6743 case 8: /* unlinked VMID match (reserved if no EL2) */ 6744 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 6745 qemu_log_mask(LOG_UNIMP, 6746 "arm: unlinked context breakpoint types not implemented\n"); 6747 return; 6748 case 9: /* linked VMID match (reserved if no EL2) */ 6749 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 6750 case 3: /* linked context ID match */ 6751 default: 6752 /* We must generate no events for Linked context matches (unless 6753 * they are linked to by some other bp/wp, which is handled in 6754 * updates for the linking bp/wp). We choose to also generate no events 6755 * for reserved values. 6756 */ 6757 return; 6758 } 6759 6760 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 6761 } 6762 6763 void hw_breakpoint_update_all(ARMCPU *cpu) 6764 { 6765 int i; 6766 CPUARMState *env = &cpu->env; 6767 6768 /* Completely clear out existing QEMU breakpoints and our array, to 6769 * avoid possible stale entries following migration load. 6770 */ 6771 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 6772 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 6773 6774 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 6775 hw_breakpoint_update(cpu, i); 6776 } 6777 } 6778 6779 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6780 uint64_t value) 6781 { 6782 ARMCPU *cpu = env_archcpu(env); 6783 int i = ri->crm; 6784 6785 raw_write(env, ri, value); 6786 hw_breakpoint_update(cpu, i); 6787 } 6788 6789 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6790 uint64_t value) 6791 { 6792 ARMCPU *cpu = env_archcpu(env); 6793 int i = ri->crm; 6794 6795 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 6796 * copy of BAS[0]. 6797 */ 6798 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 6799 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 6800 6801 raw_write(env, ri, value); 6802 hw_breakpoint_update(cpu, i); 6803 } 6804 6805 static void define_debug_regs(ARMCPU *cpu) 6806 { 6807 /* Define v7 and v8 architectural debug registers. 6808 * These are just dummy implementations for now. 6809 */ 6810 int i; 6811 int wrps, brps, ctx_cmps; 6812 6813 /* 6814 * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot 6815 * use AArch32. Given that bit 15 is RES1, if the value is 0 then 6816 * the register must not exist for this cpu. 6817 */ 6818 if (cpu->isar.dbgdidr != 0) { 6819 ARMCPRegInfo dbgdidr = { 6820 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, 6821 .opc1 = 0, .opc2 = 0, 6822 .access = PL0_R, .accessfn = access_tda, 6823 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, 6824 }; 6825 define_one_arm_cp_reg(cpu, &dbgdidr); 6826 } 6827 6828 /* Note that all these register fields hold "number of Xs minus 1". */ 6829 brps = arm_num_brps(cpu); 6830 wrps = arm_num_wrps(cpu); 6831 ctx_cmps = arm_num_ctx_cmps(cpu); 6832 6833 assert(ctx_cmps <= brps); 6834 6835 define_arm_cp_regs(cpu, debug_cp_reginfo); 6836 6837 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 6838 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 6839 } 6840 6841 for (i = 0; i < brps; i++) { 6842 ARMCPRegInfo dbgregs[] = { 6843 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, 6844 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 6845 .access = PL1_RW, .accessfn = access_tda, 6846 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 6847 .writefn = dbgbvr_write, .raw_writefn = raw_write 6848 }, 6849 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, 6850 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 6851 .access = PL1_RW, .accessfn = access_tda, 6852 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 6853 .writefn = dbgbcr_write, .raw_writefn = raw_write 6854 }, 6855 REGINFO_SENTINEL 6856 }; 6857 define_arm_cp_regs(cpu, dbgregs); 6858 } 6859 6860 for (i = 0; i < wrps; i++) { 6861 ARMCPRegInfo dbgregs[] = { 6862 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, 6863 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 6864 .access = PL1_RW, .accessfn = access_tda, 6865 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 6866 .writefn = dbgwvr_write, .raw_writefn = raw_write 6867 }, 6868 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, 6869 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 6870 .access = PL1_RW, .accessfn = access_tda, 6871 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 6872 .writefn = dbgwcr_write, .raw_writefn = raw_write 6873 }, 6874 REGINFO_SENTINEL 6875 }; 6876 define_arm_cp_regs(cpu, dbgregs); 6877 } 6878 } 6879 6880 static void define_pmu_regs(ARMCPU *cpu) 6881 { 6882 /* 6883 * v7 performance monitor control register: same implementor 6884 * field as main ID register, and we implement four counters in 6885 * addition to the cycle count register. 6886 */ 6887 unsigned int i, pmcrn = PMCR_NUM_COUNTERS; 6888 ARMCPRegInfo pmcr = { 6889 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, 6890 .access = PL0_RW, 6891 .type = ARM_CP_IO | ARM_CP_ALIAS, 6892 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), 6893 .accessfn = pmreg_access, .writefn = pmcr_write, 6894 .raw_writefn = raw_write, 6895 }; 6896 ARMCPRegInfo pmcr64 = { 6897 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, 6898 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, 6899 .access = PL0_RW, .accessfn = pmreg_access, 6900 .type = ARM_CP_IO, 6901 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), 6902 .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) | 6903 PMCRLC, 6904 .writefn = pmcr_write, .raw_writefn = raw_write, 6905 }; 6906 define_one_arm_cp_reg(cpu, &pmcr); 6907 define_one_arm_cp_reg(cpu, &pmcr64); 6908 for (i = 0; i < pmcrn; i++) { 6909 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); 6910 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); 6911 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); 6912 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); 6913 ARMCPRegInfo pmev_regs[] = { 6914 { .name = pmevcntr_name, .cp = 15, .crn = 14, 6915 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6916 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6917 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6918 .accessfn = pmreg_access }, 6919 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, 6920 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), 6921 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 6922 .type = ARM_CP_IO, 6923 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6924 .raw_readfn = pmevcntr_rawread, 6925 .raw_writefn = pmevcntr_rawwrite }, 6926 { .name = pmevtyper_name, .cp = 15, .crn = 14, 6927 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6928 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6929 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6930 .accessfn = pmreg_access }, 6931 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, 6932 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), 6933 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 6934 .type = ARM_CP_IO, 6935 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6936 .raw_writefn = pmevtyper_rawwrite }, 6937 REGINFO_SENTINEL 6938 }; 6939 define_arm_cp_regs(cpu, pmev_regs); 6940 g_free(pmevcntr_name); 6941 g_free(pmevcntr_el0_name); 6942 g_free(pmevtyper_name); 6943 g_free(pmevtyper_el0_name); 6944 } 6945 if (cpu_isar_feature(aa32_pmu_8_1, cpu)) { 6946 ARMCPRegInfo v81_pmu_regs[] = { 6947 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, 6948 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, 6949 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6950 .resetvalue = extract64(cpu->pmceid0, 32, 32) }, 6951 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, 6952 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, 6953 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6954 .resetvalue = extract64(cpu->pmceid1, 32, 32) }, 6955 REGINFO_SENTINEL 6956 }; 6957 define_arm_cp_regs(cpu, v81_pmu_regs); 6958 } 6959 if (cpu_isar_feature(any_pmu_8_4, cpu)) { 6960 static const ARMCPRegInfo v84_pmmir = { 6961 .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, 6962 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, 6963 .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6964 .resetvalue = 0 6965 }; 6966 define_one_arm_cp_reg(cpu, &v84_pmmir); 6967 } 6968 } 6969 6970 /* We don't know until after realize whether there's a GICv3 6971 * attached, and that is what registers the gicv3 sysregs. 6972 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 6973 * at runtime. 6974 */ 6975 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 6976 { 6977 ARMCPU *cpu = env_archcpu(env); 6978 uint64_t pfr1 = cpu->isar.id_pfr1; 6979 6980 if (env->gicv3state) { 6981 pfr1 |= 1 << 28; 6982 } 6983 return pfr1; 6984 } 6985 6986 #ifndef CONFIG_USER_ONLY 6987 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 6988 { 6989 ARMCPU *cpu = env_archcpu(env); 6990 uint64_t pfr0 = cpu->isar.id_aa64pfr0; 6991 6992 if (env->gicv3state) { 6993 pfr0 |= 1 << 24; 6994 } 6995 return pfr0; 6996 } 6997 #endif 6998 6999 /* Shared logic between LORID and the rest of the LOR* registers. 7000 * Secure state exclusion has already been dealt with. 7001 */ 7002 static CPAccessResult access_lor_ns(CPUARMState *env, 7003 const ARMCPRegInfo *ri, bool isread) 7004 { 7005 int el = arm_current_el(env); 7006 7007 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) { 7008 return CP_ACCESS_TRAP_EL2; 7009 } 7010 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { 7011 return CP_ACCESS_TRAP_EL3; 7012 } 7013 return CP_ACCESS_OK; 7014 } 7015 7016 static CPAccessResult access_lor_other(CPUARMState *env, 7017 const ARMCPRegInfo *ri, bool isread) 7018 { 7019 if (arm_is_secure_below_el3(env)) { 7020 /* Access denied in secure mode. */ 7021 return CP_ACCESS_TRAP; 7022 } 7023 return access_lor_ns(env, ri, isread); 7024 } 7025 7026 /* 7027 * A trivial implementation of ARMv8.1-LOR leaves all of these 7028 * registers fixed at 0, which indicates that there are zero 7029 * supported Limited Ordering regions. 7030 */ 7031 static const ARMCPRegInfo lor_reginfo[] = { 7032 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64, 7033 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0, 7034 .access = PL1_RW, .accessfn = access_lor_other, 7035 .type = ARM_CP_CONST, .resetvalue = 0 }, 7036 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64, 7037 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1, 7038 .access = PL1_RW, .accessfn = access_lor_other, 7039 .type = ARM_CP_CONST, .resetvalue = 0 }, 7040 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64, 7041 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2, 7042 .access = PL1_RW, .accessfn = access_lor_other, 7043 .type = ARM_CP_CONST, .resetvalue = 0 }, 7044 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64, 7045 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3, 7046 .access = PL1_RW, .accessfn = access_lor_other, 7047 .type = ARM_CP_CONST, .resetvalue = 0 }, 7048 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64, 7049 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, 7050 .access = PL1_R, .accessfn = access_lor_ns, 7051 .type = ARM_CP_CONST, .resetvalue = 0 }, 7052 REGINFO_SENTINEL 7053 }; 7054 7055 #ifdef TARGET_AARCH64 7056 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, 7057 bool isread) 7058 { 7059 int el = arm_current_el(env); 7060 7061 if (el < 2 && 7062 arm_feature(env, ARM_FEATURE_EL2) && 7063 !(arm_hcr_el2_eff(env) & HCR_APK)) { 7064 return CP_ACCESS_TRAP_EL2; 7065 } 7066 if (el < 3 && 7067 arm_feature(env, ARM_FEATURE_EL3) && 7068 !(env->cp15.scr_el3 & SCR_APK)) { 7069 return CP_ACCESS_TRAP_EL3; 7070 } 7071 return CP_ACCESS_OK; 7072 } 7073 7074 static const ARMCPRegInfo pauth_reginfo[] = { 7075 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 7076 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0, 7077 .access = PL1_RW, .accessfn = access_pauth, 7078 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) }, 7079 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 7080 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1, 7081 .access = PL1_RW, .accessfn = access_pauth, 7082 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) }, 7083 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 7084 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2, 7085 .access = PL1_RW, .accessfn = access_pauth, 7086 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) }, 7087 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 7088 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3, 7089 .access = PL1_RW, .accessfn = access_pauth, 7090 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) }, 7091 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 7092 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0, 7093 .access = PL1_RW, .accessfn = access_pauth, 7094 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) }, 7095 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 7096 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1, 7097 .access = PL1_RW, .accessfn = access_pauth, 7098 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) }, 7099 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 7100 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0, 7101 .access = PL1_RW, .accessfn = access_pauth, 7102 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) }, 7103 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 7104 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1, 7105 .access = PL1_RW, .accessfn = access_pauth, 7106 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) }, 7107 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 7108 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2, 7109 .access = PL1_RW, .accessfn = access_pauth, 7110 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) }, 7111 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 7112 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3, 7113 .access = PL1_RW, .accessfn = access_pauth, 7114 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) }, 7115 REGINFO_SENTINEL 7116 }; 7117 7118 static const ARMCPRegInfo tlbirange_reginfo[] = { 7119 { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64, 7120 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1, 7121 .access = PL1_W, .type = ARM_CP_NO_RAW, 7122 .writefn = tlbi_aa64_rvae1is_write }, 7123 { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64, 7124 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3, 7125 .access = PL1_W, .type = ARM_CP_NO_RAW, 7126 .writefn = tlbi_aa64_rvae1is_write }, 7127 { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64, 7128 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5, 7129 .access = PL1_W, .type = ARM_CP_NO_RAW, 7130 .writefn = tlbi_aa64_rvae1is_write }, 7131 { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64, 7132 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7, 7133 .access = PL1_W, .type = ARM_CP_NO_RAW, 7134 .writefn = tlbi_aa64_rvae1is_write }, 7135 { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64, 7136 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 7137 .access = PL1_W, .type = ARM_CP_NO_RAW, 7138 .writefn = tlbi_aa64_rvae1is_write }, 7139 { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64, 7140 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3, 7141 .access = PL1_W, .type = ARM_CP_NO_RAW, 7142 .writefn = tlbi_aa64_rvae1is_write }, 7143 { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64, 7144 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5, 7145 .access = PL1_W, .type = ARM_CP_NO_RAW, 7146 .writefn = tlbi_aa64_rvae1is_write }, 7147 { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64, 7148 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7, 7149 .access = PL1_W, .type = ARM_CP_NO_RAW, 7150 .writefn = tlbi_aa64_rvae1is_write }, 7151 { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64, 7152 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 7153 .access = PL1_W, .type = ARM_CP_NO_RAW, 7154 .writefn = tlbi_aa64_rvae1_write }, 7155 { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64, 7156 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3, 7157 .access = PL1_W, .type = ARM_CP_NO_RAW, 7158 .writefn = tlbi_aa64_rvae1_write }, 7159 { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64, 7160 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5, 7161 .access = PL1_W, .type = ARM_CP_NO_RAW, 7162 .writefn = tlbi_aa64_rvae1_write }, 7163 { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64, 7164 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7, 7165 .access = PL1_W, .type = ARM_CP_NO_RAW, 7166 .writefn = tlbi_aa64_rvae1_write }, 7167 { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64, 7168 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2, 7169 .access = PL2_W, .type = ARM_CP_NOP }, 7170 { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64, 7171 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6, 7172 .access = PL2_W, .type = ARM_CP_NOP }, 7173 { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64, 7174 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1, 7175 .access = PL2_W, .type = ARM_CP_NO_RAW, 7176 .writefn = tlbi_aa64_rvae2is_write }, 7177 { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64, 7178 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5, 7179 .access = PL2_W, .type = ARM_CP_NO_RAW, 7180 .writefn = tlbi_aa64_rvae2is_write }, 7181 { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64, 7182 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2, 7183 .access = PL2_W, .type = ARM_CP_NOP }, 7184 { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64, 7185 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6, 7186 .access = PL2_W, .type = ARM_CP_NOP }, 7187 { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64, 7188 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1, 7189 .access = PL2_W, .type = ARM_CP_NO_RAW, 7190 .writefn = tlbi_aa64_rvae2is_write }, 7191 { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64, 7192 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5, 7193 .access = PL2_W, .type = ARM_CP_NO_RAW, 7194 .writefn = tlbi_aa64_rvae2is_write }, 7195 { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64, 7196 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1, 7197 .access = PL2_W, .type = ARM_CP_NO_RAW, 7198 .writefn = tlbi_aa64_rvae2_write }, 7199 { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64, 7200 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5, 7201 .access = PL2_W, .type = ARM_CP_NO_RAW, 7202 .writefn = tlbi_aa64_rvae2_write }, 7203 { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64, 7204 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1, 7205 .access = PL3_W, .type = ARM_CP_NO_RAW, 7206 .writefn = tlbi_aa64_rvae3is_write }, 7207 { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64, 7208 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5, 7209 .access = PL3_W, .type = ARM_CP_NO_RAW, 7210 .writefn = tlbi_aa64_rvae3is_write }, 7211 { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64, 7212 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1, 7213 .access = PL3_W, .type = ARM_CP_NO_RAW, 7214 .writefn = tlbi_aa64_rvae3is_write }, 7215 { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64, 7216 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5, 7217 .access = PL3_W, .type = ARM_CP_NO_RAW, 7218 .writefn = tlbi_aa64_rvae3is_write }, 7219 { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64, 7220 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1, 7221 .access = PL3_W, .type = ARM_CP_NO_RAW, 7222 .writefn = tlbi_aa64_rvae3_write }, 7223 { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64, 7224 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5, 7225 .access = PL3_W, .type = ARM_CP_NO_RAW, 7226 .writefn = tlbi_aa64_rvae3_write }, 7227 REGINFO_SENTINEL 7228 }; 7229 7230 static const ARMCPRegInfo tlbios_reginfo[] = { 7231 { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64, 7232 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0, 7233 .access = PL1_W, .type = ARM_CP_NO_RAW, 7234 .writefn = tlbi_aa64_vmalle1is_write }, 7235 { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64, 7236 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2, 7237 .access = PL1_W, .type = ARM_CP_NO_RAW, 7238 .writefn = tlbi_aa64_vmalle1is_write }, 7239 { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64, 7240 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0, 7241 .access = PL2_W, .type = ARM_CP_NO_RAW, 7242 .writefn = tlbi_aa64_alle2is_write }, 7243 { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64, 7244 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4, 7245 .access = PL2_W, .type = ARM_CP_NO_RAW, 7246 .writefn = tlbi_aa64_alle1is_write }, 7247 { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64, 7248 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6, 7249 .access = PL2_W, .type = ARM_CP_NO_RAW, 7250 .writefn = tlbi_aa64_alle1is_write }, 7251 { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64, 7252 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0, 7253 .access = PL2_W, .type = ARM_CP_NOP }, 7254 { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64, 7255 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3, 7256 .access = PL2_W, .type = ARM_CP_NOP }, 7257 { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64, 7258 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4, 7259 .access = PL2_W, .type = ARM_CP_NOP }, 7260 { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64, 7261 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7, 7262 .access = PL2_W, .type = ARM_CP_NOP }, 7263 { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64, 7264 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0, 7265 .access = PL3_W, .type = ARM_CP_NO_RAW, 7266 .writefn = tlbi_aa64_alle3is_write }, 7267 REGINFO_SENTINEL 7268 }; 7269 7270 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 7271 { 7272 Error *err = NULL; 7273 uint64_t ret; 7274 7275 /* Success sets NZCV = 0000. */ 7276 env->NF = env->CF = env->VF = 0, env->ZF = 1; 7277 7278 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) { 7279 /* 7280 * ??? Failed, for unknown reasons in the crypto subsystem. 7281 * The best we can do is log the reason and return the 7282 * timed-out indication to the guest. There is no reason 7283 * we know to expect this failure to be transitory, so the 7284 * guest may well hang retrying the operation. 7285 */ 7286 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", 7287 ri->name, error_get_pretty(err)); 7288 error_free(err); 7289 7290 env->ZF = 0; /* NZCF = 0100 */ 7291 return 0; 7292 } 7293 return ret; 7294 } 7295 7296 /* We do not support re-seeding, so the two registers operate the same. */ 7297 static const ARMCPRegInfo rndr_reginfo[] = { 7298 { .name = "RNDR", .state = ARM_CP_STATE_AA64, 7299 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 7300 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0, 7301 .access = PL0_R, .readfn = rndr_readfn }, 7302 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64, 7303 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 7304 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1, 7305 .access = PL0_R, .readfn = rndr_readfn }, 7306 REGINFO_SENTINEL 7307 }; 7308 7309 #ifndef CONFIG_USER_ONLY 7310 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, 7311 uint64_t value) 7312 { 7313 ARMCPU *cpu = env_archcpu(env); 7314 /* CTR_EL0 System register -> DminLine, bits [19:16] */ 7315 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); 7316 uint64_t vaddr_in = (uint64_t) value; 7317 uint64_t vaddr = vaddr_in & ~(dline_size - 1); 7318 void *haddr; 7319 int mem_idx = cpu_mmu_index(env, false); 7320 7321 /* This won't be crossing page boundaries */ 7322 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); 7323 if (haddr) { 7324 7325 ram_addr_t offset; 7326 MemoryRegion *mr; 7327 7328 /* RCU lock is already being held */ 7329 mr = memory_region_from_host(haddr, &offset); 7330 7331 if (mr) { 7332 memory_region_writeback(mr, offset, dline_size); 7333 } 7334 } 7335 } 7336 7337 static const ARMCPRegInfo dcpop_reg[] = { 7338 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64, 7339 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1, 7340 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, 7341 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, 7342 REGINFO_SENTINEL 7343 }; 7344 7345 static const ARMCPRegInfo dcpodp_reg[] = { 7346 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64, 7347 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1, 7348 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, 7349 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, 7350 REGINFO_SENTINEL 7351 }; 7352 #endif /*CONFIG_USER_ONLY*/ 7353 7354 static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri, 7355 bool isread) 7356 { 7357 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) { 7358 return CP_ACCESS_TRAP_EL2; 7359 } 7360 7361 return CP_ACCESS_OK; 7362 } 7363 7364 static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri, 7365 bool isread) 7366 { 7367 int el = arm_current_el(env); 7368 7369 if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { 7370 uint64_t hcr = arm_hcr_el2_eff(env); 7371 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 7372 return CP_ACCESS_TRAP_EL2; 7373 } 7374 } 7375 if (el < 3 && 7376 arm_feature(env, ARM_FEATURE_EL3) && 7377 !(env->cp15.scr_el3 & SCR_ATA)) { 7378 return CP_ACCESS_TRAP_EL3; 7379 } 7380 return CP_ACCESS_OK; 7381 } 7382 7383 static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri) 7384 { 7385 return env->pstate & PSTATE_TCO; 7386 } 7387 7388 static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 7389 { 7390 env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO); 7391 } 7392 7393 static const ARMCPRegInfo mte_reginfo[] = { 7394 { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64, 7395 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1, 7396 .access = PL1_RW, .accessfn = access_mte, 7397 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) }, 7398 { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64, 7399 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0, 7400 .access = PL1_RW, .accessfn = access_mte, 7401 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) }, 7402 { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64, 7403 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0, 7404 .access = PL2_RW, .accessfn = access_mte, 7405 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) }, 7406 { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64, 7407 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0, 7408 .access = PL3_RW, 7409 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) }, 7410 { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64, 7411 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5, 7412 .access = PL1_RW, .accessfn = access_mte, 7413 .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) }, 7414 { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64, 7415 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6, 7416 .access = PL1_RW, .accessfn = access_mte, 7417 .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) }, 7418 { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64, 7419 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4, 7420 .access = PL1_R, .accessfn = access_aa64_tid5, 7421 .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS }, 7422 { .name = "TCO", .state = ARM_CP_STATE_AA64, 7423 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7, 7424 .type = ARM_CP_NO_RAW, 7425 .access = PL0_RW, .readfn = tco_read, .writefn = tco_write }, 7426 { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64, 7427 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3, 7428 .type = ARM_CP_NOP, .access = PL1_W, 7429 .accessfn = aa64_cacheop_poc_access }, 7430 { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64, 7431 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4, 7432 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7433 { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64, 7434 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5, 7435 .type = ARM_CP_NOP, .access = PL1_W, 7436 .accessfn = aa64_cacheop_poc_access }, 7437 { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64, 7438 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6, 7439 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7440 { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64, 7441 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4, 7442 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7443 { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64, 7444 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6, 7445 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7446 { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64, 7447 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4, 7448 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7449 { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64, 7450 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6, 7451 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7452 REGINFO_SENTINEL 7453 }; 7454 7455 static const ARMCPRegInfo mte_tco_ro_reginfo[] = { 7456 { .name = "TCO", .state = ARM_CP_STATE_AA64, 7457 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7, 7458 .type = ARM_CP_CONST, .access = PL0_RW, }, 7459 REGINFO_SENTINEL 7460 }; 7461 7462 static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = { 7463 { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64, 7464 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3, 7465 .type = ARM_CP_NOP, .access = PL0_W, 7466 .accessfn = aa64_cacheop_poc_access }, 7467 { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64, 7468 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5, 7469 .type = ARM_CP_NOP, .access = PL0_W, 7470 .accessfn = aa64_cacheop_poc_access }, 7471 { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64, 7472 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3, 7473 .type = ARM_CP_NOP, .access = PL0_W, 7474 .accessfn = aa64_cacheop_poc_access }, 7475 { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64, 7476 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5, 7477 .type = ARM_CP_NOP, .access = PL0_W, 7478 .accessfn = aa64_cacheop_poc_access }, 7479 { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64, 7480 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3, 7481 .type = ARM_CP_NOP, .access = PL0_W, 7482 .accessfn = aa64_cacheop_poc_access }, 7483 { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64, 7484 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5, 7485 .type = ARM_CP_NOP, .access = PL0_W, 7486 .accessfn = aa64_cacheop_poc_access }, 7487 { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64, 7488 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3, 7489 .type = ARM_CP_NOP, .access = PL0_W, 7490 .accessfn = aa64_cacheop_poc_access }, 7491 { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64, 7492 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5, 7493 .type = ARM_CP_NOP, .access = PL0_W, 7494 .accessfn = aa64_cacheop_poc_access }, 7495 { .name = "DC_GVA", .state = ARM_CP_STATE_AA64, 7496 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3, 7497 .access = PL0_W, .type = ARM_CP_DC_GVA, 7498 #ifndef CONFIG_USER_ONLY 7499 /* Avoid overhead of an access check that always passes in user-mode */ 7500 .accessfn = aa64_zva_access, 7501 #endif 7502 }, 7503 { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64, 7504 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4, 7505 .access = PL0_W, .type = ARM_CP_DC_GZVA, 7506 #ifndef CONFIG_USER_ONLY 7507 /* Avoid overhead of an access check that always passes in user-mode */ 7508 .accessfn = aa64_zva_access, 7509 #endif 7510 }, 7511 REGINFO_SENTINEL 7512 }; 7513 7514 #endif 7515 7516 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, 7517 bool isread) 7518 { 7519 int el = arm_current_el(env); 7520 7521 if (el == 0) { 7522 uint64_t sctlr = arm_sctlr(env, el); 7523 if (!(sctlr & SCTLR_EnRCTX)) { 7524 return CP_ACCESS_TRAP; 7525 } 7526 } else if (el == 1) { 7527 uint64_t hcr = arm_hcr_el2_eff(env); 7528 if (hcr & HCR_NV) { 7529 return CP_ACCESS_TRAP_EL2; 7530 } 7531 } 7532 return CP_ACCESS_OK; 7533 } 7534 7535 static const ARMCPRegInfo predinv_reginfo[] = { 7536 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64, 7537 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4, 7538 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7539 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64, 7540 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5, 7541 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7542 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64, 7543 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7, 7544 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7545 /* 7546 * Note the AArch32 opcodes have a different OPC1. 7547 */ 7548 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32, 7549 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4, 7550 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7551 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32, 7552 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5, 7553 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7554 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32, 7555 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7, 7556 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7557 REGINFO_SENTINEL 7558 }; 7559 7560 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri) 7561 { 7562 /* Read the high 32 bits of the current CCSIDR */ 7563 return extract64(ccsidr_read(env, ri), 32, 32); 7564 } 7565 7566 static const ARMCPRegInfo ccsidr2_reginfo[] = { 7567 { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH, 7568 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2, 7569 .access = PL1_R, 7570 .accessfn = access_aa64_tid2, 7571 .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW }, 7572 REGINFO_SENTINEL 7573 }; 7574 7575 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri, 7576 bool isread) 7577 { 7578 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) { 7579 return CP_ACCESS_TRAP_EL2; 7580 } 7581 7582 return CP_ACCESS_OK; 7583 } 7584 7585 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri, 7586 bool isread) 7587 { 7588 if (arm_feature(env, ARM_FEATURE_V8)) { 7589 return access_aa64_tid3(env, ri, isread); 7590 } 7591 7592 return CP_ACCESS_OK; 7593 } 7594 7595 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri, 7596 bool isread) 7597 { 7598 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) { 7599 return CP_ACCESS_TRAP_EL2; 7600 } 7601 7602 return CP_ACCESS_OK; 7603 } 7604 7605 static CPAccessResult access_joscr_jmcr(CPUARMState *env, 7606 const ARMCPRegInfo *ri, bool isread) 7607 { 7608 /* 7609 * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only 7610 * in v7A, not in v8A. 7611 */ 7612 if (!arm_feature(env, ARM_FEATURE_V8) && 7613 arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) && 7614 (env->cp15.hstr_el2 & HSTR_TJDBX)) { 7615 return CP_ACCESS_TRAP_EL2; 7616 } 7617 return CP_ACCESS_OK; 7618 } 7619 7620 static const ARMCPRegInfo jazelle_regs[] = { 7621 { .name = "JIDR", 7622 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0, 7623 .access = PL1_R, .accessfn = access_jazelle, 7624 .type = ARM_CP_CONST, .resetvalue = 0 }, 7625 { .name = "JOSCR", 7626 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0, 7627 .accessfn = access_joscr_jmcr, 7628 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 7629 { .name = "JMCR", 7630 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0, 7631 .accessfn = access_joscr_jmcr, 7632 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 7633 REGINFO_SENTINEL 7634 }; 7635 7636 static const ARMCPRegInfo vhe_reginfo[] = { 7637 { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64, 7638 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1, 7639 .access = PL2_RW, 7640 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) }, 7641 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64, 7642 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1, 7643 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write, 7644 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) }, 7645 #ifndef CONFIG_USER_ONLY 7646 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64, 7647 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2, 7648 .fieldoffset = 7649 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval), 7650 .type = ARM_CP_IO, .access = PL2_RW, 7651 .writefn = gt_hv_cval_write, .raw_writefn = raw_write }, 7652 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 7653 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0, 7654 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 7655 .resetfn = gt_hv_timer_reset, 7656 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write }, 7657 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH, 7658 .type = ARM_CP_IO, 7659 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1, 7660 .access = PL2_RW, 7661 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl), 7662 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write }, 7663 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64, 7664 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1, 7665 .type = ARM_CP_IO | ARM_CP_ALIAS, 7666 .access = PL2_RW, .accessfn = e2h_access, 7667 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 7668 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write }, 7669 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64, 7670 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1, 7671 .type = ARM_CP_IO | ARM_CP_ALIAS, 7672 .access = PL2_RW, .accessfn = e2h_access, 7673 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 7674 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write }, 7675 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64, 7676 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0, 7677 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, 7678 .access = PL2_RW, .accessfn = e2h_access, 7679 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write }, 7680 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64, 7681 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0, 7682 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, 7683 .access = PL2_RW, .accessfn = e2h_access, 7684 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write }, 7685 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64, 7686 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2, 7687 .type = ARM_CP_IO | ARM_CP_ALIAS, 7688 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 7689 .access = PL2_RW, .accessfn = e2h_access, 7690 .writefn = gt_phys_cval_write, .raw_writefn = raw_write }, 7691 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64, 7692 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2, 7693 .type = ARM_CP_IO | ARM_CP_ALIAS, 7694 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 7695 .access = PL2_RW, .accessfn = e2h_access, 7696 .writefn = gt_virt_cval_write, .raw_writefn = raw_write }, 7697 #endif 7698 REGINFO_SENTINEL 7699 }; 7700 7701 #ifndef CONFIG_USER_ONLY 7702 static const ARMCPRegInfo ats1e1_reginfo[] = { 7703 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 7704 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, 7705 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7706 .writefn = ats_write64 }, 7707 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 7708 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, 7709 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7710 .writefn = ats_write64 }, 7711 REGINFO_SENTINEL 7712 }; 7713 7714 static const ARMCPRegInfo ats1cp_reginfo[] = { 7715 { .name = "ATS1CPRP", 7716 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, 7717 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7718 .writefn = ats_write }, 7719 { .name = "ATS1CPWP", 7720 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, 7721 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7722 .writefn = ats_write }, 7723 REGINFO_SENTINEL 7724 }; 7725 #endif 7726 7727 /* 7728 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and 7729 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field 7730 * is non-zero, which is never for ARMv7, optionally in ARMv8 7731 * and mandatorily for ARMv8.2 and up. 7732 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's 7733 * implementation is RAZ/WI we can ignore this detail, as we 7734 * do for ACTLR. 7735 */ 7736 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = { 7737 { .name = "ACTLR2", .state = ARM_CP_STATE_AA32, 7738 .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3, 7739 .access = PL1_RW, .accessfn = access_tacr, 7740 .type = ARM_CP_CONST, .resetvalue = 0 }, 7741 { .name = "HACTLR2", .state = ARM_CP_STATE_AA32, 7742 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, 7743 .access = PL2_RW, .type = ARM_CP_CONST, 7744 .resetvalue = 0 }, 7745 REGINFO_SENTINEL 7746 }; 7747 7748 void register_cp_regs_for_features(ARMCPU *cpu) 7749 { 7750 /* Register all the coprocessor registers based on feature bits */ 7751 CPUARMState *env = &cpu->env; 7752 if (arm_feature(env, ARM_FEATURE_M)) { 7753 /* M profile has no coprocessor registers */ 7754 return; 7755 } 7756 7757 define_arm_cp_regs(cpu, cp_reginfo); 7758 if (!arm_feature(env, ARM_FEATURE_V8)) { 7759 /* Must go early as it is full of wildcards that may be 7760 * overridden by later definitions. 7761 */ 7762 define_arm_cp_regs(cpu, not_v8_cp_reginfo); 7763 } 7764 7765 if (arm_feature(env, ARM_FEATURE_V6)) { 7766 /* The ID registers all have impdef reset values */ 7767 ARMCPRegInfo v6_idregs[] = { 7768 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, 7769 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 7770 .access = PL1_R, .type = ARM_CP_CONST, 7771 .accessfn = access_aa32_tid3, 7772 .resetvalue = cpu->isar.id_pfr0 }, 7773 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know 7774 * the value of the GIC field until after we define these regs. 7775 */ 7776 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, 7777 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, 7778 .access = PL1_R, .type = ARM_CP_NO_RAW, 7779 .accessfn = access_aa32_tid3, 7780 .readfn = id_pfr1_read, 7781 .writefn = arm_cp_write_ignore }, 7782 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, 7783 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, 7784 .access = PL1_R, .type = ARM_CP_CONST, 7785 .accessfn = access_aa32_tid3, 7786 .resetvalue = cpu->isar.id_dfr0 }, 7787 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, 7788 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, 7789 .access = PL1_R, .type = ARM_CP_CONST, 7790 .accessfn = access_aa32_tid3, 7791 .resetvalue = cpu->id_afr0 }, 7792 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, 7793 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, 7794 .access = PL1_R, .type = ARM_CP_CONST, 7795 .accessfn = access_aa32_tid3, 7796 .resetvalue = cpu->isar.id_mmfr0 }, 7797 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, 7798 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, 7799 .access = PL1_R, .type = ARM_CP_CONST, 7800 .accessfn = access_aa32_tid3, 7801 .resetvalue = cpu->isar.id_mmfr1 }, 7802 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, 7803 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, 7804 .access = PL1_R, .type = ARM_CP_CONST, 7805 .accessfn = access_aa32_tid3, 7806 .resetvalue = cpu->isar.id_mmfr2 }, 7807 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, 7808 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, 7809 .access = PL1_R, .type = ARM_CP_CONST, 7810 .accessfn = access_aa32_tid3, 7811 .resetvalue = cpu->isar.id_mmfr3 }, 7812 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, 7813 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 7814 .access = PL1_R, .type = ARM_CP_CONST, 7815 .accessfn = access_aa32_tid3, 7816 .resetvalue = cpu->isar.id_isar0 }, 7817 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, 7818 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, 7819 .access = PL1_R, .type = ARM_CP_CONST, 7820 .accessfn = access_aa32_tid3, 7821 .resetvalue = cpu->isar.id_isar1 }, 7822 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, 7823 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 7824 .access = PL1_R, .type = ARM_CP_CONST, 7825 .accessfn = access_aa32_tid3, 7826 .resetvalue = cpu->isar.id_isar2 }, 7827 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, 7828 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, 7829 .access = PL1_R, .type = ARM_CP_CONST, 7830 .accessfn = access_aa32_tid3, 7831 .resetvalue = cpu->isar.id_isar3 }, 7832 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, 7833 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, 7834 .access = PL1_R, .type = ARM_CP_CONST, 7835 .accessfn = access_aa32_tid3, 7836 .resetvalue = cpu->isar.id_isar4 }, 7837 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, 7838 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, 7839 .access = PL1_R, .type = ARM_CP_CONST, 7840 .accessfn = access_aa32_tid3, 7841 .resetvalue = cpu->isar.id_isar5 }, 7842 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, 7843 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, 7844 .access = PL1_R, .type = ARM_CP_CONST, 7845 .accessfn = access_aa32_tid3, 7846 .resetvalue = cpu->isar.id_mmfr4 }, 7847 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, 7848 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, 7849 .access = PL1_R, .type = ARM_CP_CONST, 7850 .accessfn = access_aa32_tid3, 7851 .resetvalue = cpu->isar.id_isar6 }, 7852 REGINFO_SENTINEL 7853 }; 7854 define_arm_cp_regs(cpu, v6_idregs); 7855 define_arm_cp_regs(cpu, v6_cp_reginfo); 7856 } else { 7857 define_arm_cp_regs(cpu, not_v6_cp_reginfo); 7858 } 7859 if (arm_feature(env, ARM_FEATURE_V6K)) { 7860 define_arm_cp_regs(cpu, v6k_cp_reginfo); 7861 } 7862 if (arm_feature(env, ARM_FEATURE_V7MP) && 7863 !arm_feature(env, ARM_FEATURE_PMSA)) { 7864 define_arm_cp_regs(cpu, v7mp_cp_reginfo); 7865 } 7866 if (arm_feature(env, ARM_FEATURE_V7VE)) { 7867 define_arm_cp_regs(cpu, pmovsset_cp_reginfo); 7868 } 7869 if (arm_feature(env, ARM_FEATURE_V7)) { 7870 ARMCPRegInfo clidr = { 7871 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, 7872 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, 7873 .access = PL1_R, .type = ARM_CP_CONST, 7874 .accessfn = access_aa64_tid2, 7875 .resetvalue = cpu->clidr 7876 }; 7877 define_one_arm_cp_reg(cpu, &clidr); 7878 define_arm_cp_regs(cpu, v7_cp_reginfo); 7879 define_debug_regs(cpu); 7880 define_pmu_regs(cpu); 7881 } else { 7882 define_arm_cp_regs(cpu, not_v7_cp_reginfo); 7883 } 7884 if (arm_feature(env, ARM_FEATURE_V8)) { 7885 /* AArch64 ID registers, which all have impdef reset values. 7886 * Note that within the ID register ranges the unused slots 7887 * must all RAZ, not UNDEF; future architecture versions may 7888 * define new registers here. 7889 */ 7890 ARMCPRegInfo v8_idregs[] = { 7891 /* 7892 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system 7893 * emulation because we don't know the right value for the 7894 * GIC field until after we define these regs. 7895 */ 7896 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, 7897 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, 7898 .access = PL1_R, 7899 #ifdef CONFIG_USER_ONLY 7900 .type = ARM_CP_CONST, 7901 .resetvalue = cpu->isar.id_aa64pfr0 7902 #else 7903 .type = ARM_CP_NO_RAW, 7904 .accessfn = access_aa64_tid3, 7905 .readfn = id_aa64pfr0_read, 7906 .writefn = arm_cp_write_ignore 7907 #endif 7908 }, 7909 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, 7910 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, 7911 .access = PL1_R, .type = ARM_CP_CONST, 7912 .accessfn = access_aa64_tid3, 7913 .resetvalue = cpu->isar.id_aa64pfr1}, 7914 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7915 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, 7916 .access = PL1_R, .type = ARM_CP_CONST, 7917 .accessfn = access_aa64_tid3, 7918 .resetvalue = 0 }, 7919 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7920 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, 7921 .access = PL1_R, .type = ARM_CP_CONST, 7922 .accessfn = access_aa64_tid3, 7923 .resetvalue = 0 }, 7924 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64, 7925 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, 7926 .access = PL1_R, .type = ARM_CP_CONST, 7927 .accessfn = access_aa64_tid3, 7928 .resetvalue = cpu->isar.id_aa64zfr0 }, 7929 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7930 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, 7931 .access = PL1_R, .type = ARM_CP_CONST, 7932 .accessfn = access_aa64_tid3, 7933 .resetvalue = 0 }, 7934 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7935 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, 7936 .access = PL1_R, .type = ARM_CP_CONST, 7937 .accessfn = access_aa64_tid3, 7938 .resetvalue = 0 }, 7939 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7940 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, 7941 .access = PL1_R, .type = ARM_CP_CONST, 7942 .accessfn = access_aa64_tid3, 7943 .resetvalue = 0 }, 7944 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, 7945 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, 7946 .access = PL1_R, .type = ARM_CP_CONST, 7947 .accessfn = access_aa64_tid3, 7948 .resetvalue = cpu->isar.id_aa64dfr0 }, 7949 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, 7950 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, 7951 .access = PL1_R, .type = ARM_CP_CONST, 7952 .accessfn = access_aa64_tid3, 7953 .resetvalue = cpu->isar.id_aa64dfr1 }, 7954 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7955 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, 7956 .access = PL1_R, .type = ARM_CP_CONST, 7957 .accessfn = access_aa64_tid3, 7958 .resetvalue = 0 }, 7959 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7960 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, 7961 .access = PL1_R, .type = ARM_CP_CONST, 7962 .accessfn = access_aa64_tid3, 7963 .resetvalue = 0 }, 7964 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, 7965 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, 7966 .access = PL1_R, .type = ARM_CP_CONST, 7967 .accessfn = access_aa64_tid3, 7968 .resetvalue = cpu->id_aa64afr0 }, 7969 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, 7970 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, 7971 .access = PL1_R, .type = ARM_CP_CONST, 7972 .accessfn = access_aa64_tid3, 7973 .resetvalue = cpu->id_aa64afr1 }, 7974 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7975 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, 7976 .access = PL1_R, .type = ARM_CP_CONST, 7977 .accessfn = access_aa64_tid3, 7978 .resetvalue = 0 }, 7979 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7980 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, 7981 .access = PL1_R, .type = ARM_CP_CONST, 7982 .accessfn = access_aa64_tid3, 7983 .resetvalue = 0 }, 7984 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, 7985 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, 7986 .access = PL1_R, .type = ARM_CP_CONST, 7987 .accessfn = access_aa64_tid3, 7988 .resetvalue = cpu->isar.id_aa64isar0 }, 7989 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, 7990 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, 7991 .access = PL1_R, .type = ARM_CP_CONST, 7992 .accessfn = access_aa64_tid3, 7993 .resetvalue = cpu->isar.id_aa64isar1 }, 7994 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7995 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 7996 .access = PL1_R, .type = ARM_CP_CONST, 7997 .accessfn = access_aa64_tid3, 7998 .resetvalue = 0 }, 7999 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 8000 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, 8001 .access = PL1_R, .type = ARM_CP_CONST, 8002 .accessfn = access_aa64_tid3, 8003 .resetvalue = 0 }, 8004 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 8005 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, 8006 .access = PL1_R, .type = ARM_CP_CONST, 8007 .accessfn = access_aa64_tid3, 8008 .resetvalue = 0 }, 8009 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 8010 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, 8011 .access = PL1_R, .type = ARM_CP_CONST, 8012 .accessfn = access_aa64_tid3, 8013 .resetvalue = 0 }, 8014 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 8015 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, 8016 .access = PL1_R, .type = ARM_CP_CONST, 8017 .accessfn = access_aa64_tid3, 8018 .resetvalue = 0 }, 8019 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 8020 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, 8021 .access = PL1_R, .type = ARM_CP_CONST, 8022 .accessfn = access_aa64_tid3, 8023 .resetvalue = 0 }, 8024 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, 8025 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 8026 .access = PL1_R, .type = ARM_CP_CONST, 8027 .accessfn = access_aa64_tid3, 8028 .resetvalue = cpu->isar.id_aa64mmfr0 }, 8029 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, 8030 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, 8031 .access = PL1_R, .type = ARM_CP_CONST, 8032 .accessfn = access_aa64_tid3, 8033 .resetvalue = cpu->isar.id_aa64mmfr1 }, 8034 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, 8035 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, 8036 .access = PL1_R, .type = ARM_CP_CONST, 8037 .accessfn = access_aa64_tid3, 8038 .resetvalue = cpu->isar.id_aa64mmfr2 }, 8039 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 8040 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, 8041 .access = PL1_R, .type = ARM_CP_CONST, 8042 .accessfn = access_aa64_tid3, 8043 .resetvalue = 0 }, 8044 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 8045 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, 8046 .access = PL1_R, .type = ARM_CP_CONST, 8047 .accessfn = access_aa64_tid3, 8048 .resetvalue = 0 }, 8049 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 8050 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, 8051 .access = PL1_R, .type = ARM_CP_CONST, 8052 .accessfn = access_aa64_tid3, 8053 .resetvalue = 0 }, 8054 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 8055 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, 8056 .access = PL1_R, .type = ARM_CP_CONST, 8057 .accessfn = access_aa64_tid3, 8058 .resetvalue = 0 }, 8059 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 8060 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, 8061 .access = PL1_R, .type = ARM_CP_CONST, 8062 .accessfn = access_aa64_tid3, 8063 .resetvalue = 0 }, 8064 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, 8065 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 8066 .access = PL1_R, .type = ARM_CP_CONST, 8067 .accessfn = access_aa64_tid3, 8068 .resetvalue = cpu->isar.mvfr0 }, 8069 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, 8070 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 8071 .access = PL1_R, .type = ARM_CP_CONST, 8072 .accessfn = access_aa64_tid3, 8073 .resetvalue = cpu->isar.mvfr1 }, 8074 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, 8075 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 8076 .access = PL1_R, .type = ARM_CP_CONST, 8077 .accessfn = access_aa64_tid3, 8078 .resetvalue = cpu->isar.mvfr2 }, 8079 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 8080 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, 8081 .access = PL1_R, .type = ARM_CP_CONST, 8082 .accessfn = access_aa64_tid3, 8083 .resetvalue = 0 }, 8084 { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH, 8085 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, 8086 .access = PL1_R, .type = ARM_CP_CONST, 8087 .accessfn = access_aa64_tid3, 8088 .resetvalue = cpu->isar.id_pfr2 }, 8089 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 8090 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, 8091 .access = PL1_R, .type = ARM_CP_CONST, 8092 .accessfn = access_aa64_tid3, 8093 .resetvalue = 0 }, 8094 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 8095 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, 8096 .access = PL1_R, .type = ARM_CP_CONST, 8097 .accessfn = access_aa64_tid3, 8098 .resetvalue = 0 }, 8099 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 8100 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, 8101 .access = PL1_R, .type = ARM_CP_CONST, 8102 .accessfn = access_aa64_tid3, 8103 .resetvalue = 0 }, 8104 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, 8105 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, 8106 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 8107 .resetvalue = extract64(cpu->pmceid0, 0, 32) }, 8108 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, 8109 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, 8110 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 8111 .resetvalue = cpu->pmceid0 }, 8112 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, 8113 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, 8114 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 8115 .resetvalue = extract64(cpu->pmceid1, 0, 32) }, 8116 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, 8117 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, 8118 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 8119 .resetvalue = cpu->pmceid1 }, 8120 REGINFO_SENTINEL 8121 }; 8122 #ifdef CONFIG_USER_ONLY 8123 ARMCPRegUserSpaceInfo v8_user_idregs[] = { 8124 { .name = "ID_AA64PFR0_EL1", 8125 .exported_bits = 0x000f000f00ff0000, 8126 .fixed_bits = 0x0000000000000011 }, 8127 { .name = "ID_AA64PFR1_EL1", 8128 .exported_bits = 0x00000000000000f0 }, 8129 { .name = "ID_AA64PFR*_EL1_RESERVED", 8130 .is_glob = true }, 8131 { .name = "ID_AA64ZFR0_EL1" }, 8132 { .name = "ID_AA64MMFR0_EL1", 8133 .fixed_bits = 0x00000000ff000000 }, 8134 { .name = "ID_AA64MMFR1_EL1" }, 8135 { .name = "ID_AA64MMFR*_EL1_RESERVED", 8136 .is_glob = true }, 8137 { .name = "ID_AA64DFR0_EL1", 8138 .fixed_bits = 0x0000000000000006 }, 8139 { .name = "ID_AA64DFR1_EL1" }, 8140 { .name = "ID_AA64DFR*_EL1_RESERVED", 8141 .is_glob = true }, 8142 { .name = "ID_AA64AFR*", 8143 .is_glob = true }, 8144 { .name = "ID_AA64ISAR0_EL1", 8145 .exported_bits = 0x00fffffff0fffff0 }, 8146 { .name = "ID_AA64ISAR1_EL1", 8147 .exported_bits = 0x000000f0ffffffff }, 8148 { .name = "ID_AA64ISAR*_EL1_RESERVED", 8149 .is_glob = true }, 8150 REGUSERINFO_SENTINEL 8151 }; 8152 modify_arm_cp_regs(v8_idregs, v8_user_idregs); 8153 #endif 8154 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ 8155 if (!arm_feature(env, ARM_FEATURE_EL3) && 8156 !arm_feature(env, ARM_FEATURE_EL2)) { 8157 ARMCPRegInfo rvbar = { 8158 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, 8159 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 8160 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar 8161 }; 8162 define_one_arm_cp_reg(cpu, &rvbar); 8163 } 8164 define_arm_cp_regs(cpu, v8_idregs); 8165 define_arm_cp_regs(cpu, v8_cp_reginfo); 8166 } 8167 if (arm_feature(env, ARM_FEATURE_EL2)) { 8168 uint64_t vmpidr_def = mpidr_read_val(env); 8169 ARMCPRegInfo vpidr_regs[] = { 8170 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, 8171 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 8172 .access = PL2_RW, .accessfn = access_el3_aa32ns, 8173 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, 8174 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, 8175 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, 8176 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 8177 .access = PL2_RW, .resetvalue = cpu->midr, 8178 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 8179 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, 8180 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 8181 .access = PL2_RW, .accessfn = access_el3_aa32ns, 8182 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, 8183 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, 8184 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, 8185 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 8186 .access = PL2_RW, 8187 .resetvalue = vmpidr_def, 8188 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 8189 REGINFO_SENTINEL 8190 }; 8191 define_arm_cp_regs(cpu, vpidr_regs); 8192 define_arm_cp_regs(cpu, el2_cp_reginfo); 8193 if (arm_feature(env, ARM_FEATURE_V8)) { 8194 define_arm_cp_regs(cpu, el2_v8_cp_reginfo); 8195 } 8196 if (cpu_isar_feature(aa64_sel2, cpu)) { 8197 define_arm_cp_regs(cpu, el2_sec_cp_reginfo); 8198 } 8199 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ 8200 if (!arm_feature(env, ARM_FEATURE_EL3)) { 8201 ARMCPRegInfo rvbar = { 8202 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, 8203 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, 8204 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar 8205 }; 8206 define_one_arm_cp_reg(cpu, &rvbar); 8207 } 8208 } else { 8209 /* If EL2 is missing but higher ELs are enabled, we need to 8210 * register the no_el2 reginfos. 8211 */ 8212 if (arm_feature(env, ARM_FEATURE_EL3)) { 8213 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value 8214 * of MIDR_EL1 and MPIDR_EL1. 8215 */ 8216 ARMCPRegInfo vpidr_regs[] = { 8217 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, 8218 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 8219 .access = PL2_RW, .accessfn = access_el3_aa32ns, 8220 .type = ARM_CP_CONST, .resetvalue = cpu->midr, 8221 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 8222 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, 8223 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 8224 .access = PL2_RW, .accessfn = access_el3_aa32ns, 8225 .type = ARM_CP_NO_RAW, 8226 .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, 8227 REGINFO_SENTINEL 8228 }; 8229 define_arm_cp_regs(cpu, vpidr_regs); 8230 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); 8231 if (arm_feature(env, ARM_FEATURE_V8)) { 8232 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo); 8233 } 8234 } 8235 } 8236 if (arm_feature(env, ARM_FEATURE_EL3)) { 8237 define_arm_cp_regs(cpu, el3_cp_reginfo); 8238 ARMCPRegInfo el3_regs[] = { 8239 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, 8240 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, 8241 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, 8242 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, 8243 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, 8244 .access = PL3_RW, 8245 .raw_writefn = raw_write, .writefn = sctlr_write, 8246 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), 8247 .resetvalue = cpu->reset_sctlr }, 8248 REGINFO_SENTINEL 8249 }; 8250 8251 define_arm_cp_regs(cpu, el3_regs); 8252 } 8253 /* The behaviour of NSACR is sufficiently various that we don't 8254 * try to describe it in a single reginfo: 8255 * if EL3 is 64 bit, then trap to EL3 from S EL1, 8256 * reads as constant 0xc00 from NS EL1 and NS EL2 8257 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 8258 * if v7 without EL3, register doesn't exist 8259 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 8260 */ 8261 if (arm_feature(env, ARM_FEATURE_EL3)) { 8262 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 8263 ARMCPRegInfo nsacr = { 8264 .name = "NSACR", .type = ARM_CP_CONST, 8265 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 8266 .access = PL1_RW, .accessfn = nsacr_access, 8267 .resetvalue = 0xc00 8268 }; 8269 define_one_arm_cp_reg(cpu, &nsacr); 8270 } else { 8271 ARMCPRegInfo nsacr = { 8272 .name = "NSACR", 8273 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 8274 .access = PL3_RW | PL1_R, 8275 .resetvalue = 0, 8276 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) 8277 }; 8278 define_one_arm_cp_reg(cpu, &nsacr); 8279 } 8280 } else { 8281 if (arm_feature(env, ARM_FEATURE_V8)) { 8282 ARMCPRegInfo nsacr = { 8283 .name = "NSACR", .type = ARM_CP_CONST, 8284 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 8285 .access = PL1_R, 8286 .resetvalue = 0xc00 8287 }; 8288 define_one_arm_cp_reg(cpu, &nsacr); 8289 } 8290 } 8291 8292 if (arm_feature(env, ARM_FEATURE_PMSA)) { 8293 if (arm_feature(env, ARM_FEATURE_V6)) { 8294 /* PMSAv6 not implemented */ 8295 assert(arm_feature(env, ARM_FEATURE_V7)); 8296 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 8297 define_arm_cp_regs(cpu, pmsav7_cp_reginfo); 8298 } else { 8299 define_arm_cp_regs(cpu, pmsav5_cp_reginfo); 8300 } 8301 } else { 8302 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 8303 define_arm_cp_regs(cpu, vmsa_cp_reginfo); 8304 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */ 8305 if (cpu_isar_feature(aa32_hpd, cpu)) { 8306 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo); 8307 } 8308 } 8309 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { 8310 define_arm_cp_regs(cpu, t2ee_cp_reginfo); 8311 } 8312 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { 8313 define_arm_cp_regs(cpu, generic_timer_cp_reginfo); 8314 } 8315 if (arm_feature(env, ARM_FEATURE_VAPA)) { 8316 define_arm_cp_regs(cpu, vapa_cp_reginfo); 8317 } 8318 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { 8319 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); 8320 } 8321 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { 8322 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); 8323 } 8324 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { 8325 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); 8326 } 8327 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 8328 define_arm_cp_regs(cpu, omap_cp_reginfo); 8329 } 8330 if (arm_feature(env, ARM_FEATURE_STRONGARM)) { 8331 define_arm_cp_regs(cpu, strongarm_cp_reginfo); 8332 } 8333 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 8334 define_arm_cp_regs(cpu, xscale_cp_reginfo); 8335 } 8336 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { 8337 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); 8338 } 8339 if (arm_feature(env, ARM_FEATURE_LPAE)) { 8340 define_arm_cp_regs(cpu, lpae_cp_reginfo); 8341 } 8342 if (cpu_isar_feature(aa32_jazelle, cpu)) { 8343 define_arm_cp_regs(cpu, jazelle_regs); 8344 } 8345 /* Slightly awkwardly, the OMAP and StrongARM cores need all of 8346 * cp15 crn=0 to be writes-ignored, whereas for other cores they should 8347 * be read-only (ie write causes UNDEF exception). 8348 */ 8349 { 8350 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { 8351 /* Pre-v8 MIDR space. 8352 * Note that the MIDR isn't a simple constant register because 8353 * of the TI925 behaviour where writes to another register can 8354 * cause the MIDR value to change. 8355 * 8356 * Unimplemented registers in the c15 0 0 0 space default to 8357 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR 8358 * and friends override accordingly. 8359 */ 8360 { .name = "MIDR", 8361 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, 8362 .access = PL1_R, .resetvalue = cpu->midr, 8363 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, 8364 .readfn = midr_read, 8365 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 8366 .type = ARM_CP_OVERRIDE }, 8367 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ 8368 { .name = "DUMMY", 8369 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, 8370 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8371 { .name = "DUMMY", 8372 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, 8373 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8374 { .name = "DUMMY", 8375 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, 8376 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8377 { .name = "DUMMY", 8378 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, 8379 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8380 { .name = "DUMMY", 8381 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, 8382 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8383 REGINFO_SENTINEL 8384 }; 8385 ARMCPRegInfo id_v8_midr_cp_reginfo[] = { 8386 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, 8387 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, 8388 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, 8389 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 8390 .readfn = midr_read }, 8391 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ 8392 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 8393 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 8394 .access = PL1_R, .resetvalue = cpu->midr }, 8395 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 8396 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, 8397 .access = PL1_R, .resetvalue = cpu->midr }, 8398 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, 8399 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, 8400 .access = PL1_R, 8401 .accessfn = access_aa64_tid1, 8402 .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, 8403 REGINFO_SENTINEL 8404 }; 8405 ARMCPRegInfo id_cp_reginfo[] = { 8406 /* These are common to v8 and pre-v8 */ 8407 { .name = "CTR", 8408 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, 8409 .access = PL1_R, .accessfn = ctr_el0_access, 8410 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 8411 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, 8412 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, 8413 .access = PL0_R, .accessfn = ctr_el0_access, 8414 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 8415 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ 8416 { .name = "TCMTR", 8417 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, 8418 .access = PL1_R, 8419 .accessfn = access_aa32_tid1, 8420 .type = ARM_CP_CONST, .resetvalue = 0 }, 8421 REGINFO_SENTINEL 8422 }; 8423 /* TLBTR is specific to VMSA */ 8424 ARMCPRegInfo id_tlbtr_reginfo = { 8425 .name = "TLBTR", 8426 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, 8427 .access = PL1_R, 8428 .accessfn = access_aa32_tid1, 8429 .type = ARM_CP_CONST, .resetvalue = 0, 8430 }; 8431 /* MPUIR is specific to PMSA V6+ */ 8432 ARMCPRegInfo id_mpuir_reginfo = { 8433 .name = "MPUIR", 8434 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 8435 .access = PL1_R, .type = ARM_CP_CONST, 8436 .resetvalue = cpu->pmsav7_dregion << 8 8437 }; 8438 ARMCPRegInfo crn0_wi_reginfo = { 8439 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, 8440 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, 8441 .type = ARM_CP_NOP | ARM_CP_OVERRIDE 8442 }; 8443 #ifdef CONFIG_USER_ONLY 8444 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = { 8445 { .name = "MIDR_EL1", 8446 .exported_bits = 0x00000000ffffffff }, 8447 { .name = "REVIDR_EL1" }, 8448 REGUSERINFO_SENTINEL 8449 }; 8450 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo); 8451 #endif 8452 if (arm_feature(env, ARM_FEATURE_OMAPCP) || 8453 arm_feature(env, ARM_FEATURE_STRONGARM)) { 8454 ARMCPRegInfo *r; 8455 /* Register the blanket "writes ignored" value first to cover the 8456 * whole space. Then update the specific ID registers to allow write 8457 * access, so that they ignore writes rather than causing them to 8458 * UNDEF. 8459 */ 8460 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); 8461 for (r = id_pre_v8_midr_cp_reginfo; 8462 r->type != ARM_CP_SENTINEL; r++) { 8463 r->access = PL1_RW; 8464 } 8465 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { 8466 r->access = PL1_RW; 8467 } 8468 id_mpuir_reginfo.access = PL1_RW; 8469 id_tlbtr_reginfo.access = PL1_RW; 8470 } 8471 if (arm_feature(env, ARM_FEATURE_V8)) { 8472 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); 8473 } else { 8474 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); 8475 } 8476 define_arm_cp_regs(cpu, id_cp_reginfo); 8477 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 8478 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); 8479 } else if (arm_feature(env, ARM_FEATURE_V7)) { 8480 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 8481 } 8482 } 8483 8484 if (arm_feature(env, ARM_FEATURE_MPIDR)) { 8485 ARMCPRegInfo mpidr_cp_reginfo[] = { 8486 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH, 8487 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, 8488 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, 8489 REGINFO_SENTINEL 8490 }; 8491 #ifdef CONFIG_USER_ONLY 8492 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = { 8493 { .name = "MPIDR_EL1", 8494 .fixed_bits = 0x0000000080000000 }, 8495 REGUSERINFO_SENTINEL 8496 }; 8497 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo); 8498 #endif 8499 define_arm_cp_regs(cpu, mpidr_cp_reginfo); 8500 } 8501 8502 if (arm_feature(env, ARM_FEATURE_AUXCR)) { 8503 ARMCPRegInfo auxcr_reginfo[] = { 8504 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, 8505 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, 8506 .access = PL1_RW, .accessfn = access_tacr, 8507 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }, 8508 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, 8509 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, 8510 .access = PL2_RW, .type = ARM_CP_CONST, 8511 .resetvalue = 0 }, 8512 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, 8513 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, 8514 .access = PL3_RW, .type = ARM_CP_CONST, 8515 .resetvalue = 0 }, 8516 REGINFO_SENTINEL 8517 }; 8518 define_arm_cp_regs(cpu, auxcr_reginfo); 8519 if (cpu_isar_feature(aa32_ac2, cpu)) { 8520 define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo); 8521 } 8522 } 8523 8524 if (arm_feature(env, ARM_FEATURE_CBAR)) { 8525 /* 8526 * CBAR is IMPDEF, but common on Arm Cortex-A implementations. 8527 * There are two flavours: 8528 * (1) older 32-bit only cores have a simple 32-bit CBAR 8529 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a 8530 * 32-bit register visible to AArch32 at a different encoding 8531 * to the "flavour 1" register and with the bits rearranged to 8532 * be able to squash a 64-bit address into the 32-bit view. 8533 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but 8534 * in future if we support AArch32-only configs of some of the 8535 * AArch64 cores we might need to add a specific feature flag 8536 * to indicate cores with "flavour 2" CBAR. 8537 */ 8538 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 8539 /* 32 bit view is [31:18] 0...0 [43:32]. */ 8540 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) 8541 | extract64(cpu->reset_cbar, 32, 12); 8542 ARMCPRegInfo cbar_reginfo[] = { 8543 { .name = "CBAR", 8544 .type = ARM_CP_CONST, 8545 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0, 8546 .access = PL1_R, .resetvalue = cbar32 }, 8547 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, 8548 .type = ARM_CP_CONST, 8549 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, 8550 .access = PL1_R, .resetvalue = cpu->reset_cbar }, 8551 REGINFO_SENTINEL 8552 }; 8553 /* We don't implement a r/w 64 bit CBAR currently */ 8554 assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); 8555 define_arm_cp_regs(cpu, cbar_reginfo); 8556 } else { 8557 ARMCPRegInfo cbar = { 8558 .name = "CBAR", 8559 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 8560 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, 8561 .fieldoffset = offsetof(CPUARMState, 8562 cp15.c15_config_base_address) 8563 }; 8564 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { 8565 cbar.access = PL1_R; 8566 cbar.fieldoffset = 0; 8567 cbar.type = ARM_CP_CONST; 8568 } 8569 define_one_arm_cp_reg(cpu, &cbar); 8570 } 8571 } 8572 8573 if (arm_feature(env, ARM_FEATURE_VBAR)) { 8574 ARMCPRegInfo vbar_cp_reginfo[] = { 8575 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, 8576 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, 8577 .access = PL1_RW, .writefn = vbar_write, 8578 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), 8579 offsetof(CPUARMState, cp15.vbar_ns) }, 8580 .resetvalue = 0 }, 8581 REGINFO_SENTINEL 8582 }; 8583 define_arm_cp_regs(cpu, vbar_cp_reginfo); 8584 } 8585 8586 /* Generic registers whose values depend on the implementation */ 8587 { 8588 ARMCPRegInfo sctlr = { 8589 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, 8590 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 8591 .access = PL1_RW, .accessfn = access_tvm_trvm, 8592 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), 8593 offsetof(CPUARMState, cp15.sctlr_ns) }, 8594 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, 8595 .raw_writefn = raw_write, 8596 }; 8597 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 8598 /* Normally we would always end the TB on an SCTLR write, but Linux 8599 * arch/arm/mach-pxa/sleep.S expects two instructions following 8600 * an MMU enable to execute from cache. Imitate this behaviour. 8601 */ 8602 sctlr.type |= ARM_CP_SUPPRESS_TB_END; 8603 } 8604 define_one_arm_cp_reg(cpu, &sctlr); 8605 } 8606 8607 if (cpu_isar_feature(aa64_lor, cpu)) { 8608 define_arm_cp_regs(cpu, lor_reginfo); 8609 } 8610 if (cpu_isar_feature(aa64_pan, cpu)) { 8611 define_one_arm_cp_reg(cpu, &pan_reginfo); 8612 } 8613 #ifndef CONFIG_USER_ONLY 8614 if (cpu_isar_feature(aa64_ats1e1, cpu)) { 8615 define_arm_cp_regs(cpu, ats1e1_reginfo); 8616 } 8617 if (cpu_isar_feature(aa32_ats1e1, cpu)) { 8618 define_arm_cp_regs(cpu, ats1cp_reginfo); 8619 } 8620 #endif 8621 if (cpu_isar_feature(aa64_uao, cpu)) { 8622 define_one_arm_cp_reg(cpu, &uao_reginfo); 8623 } 8624 8625 if (cpu_isar_feature(aa64_dit, cpu)) { 8626 define_one_arm_cp_reg(cpu, &dit_reginfo); 8627 } 8628 if (cpu_isar_feature(aa64_ssbs, cpu)) { 8629 define_one_arm_cp_reg(cpu, &ssbs_reginfo); 8630 } 8631 8632 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { 8633 define_arm_cp_regs(cpu, vhe_reginfo); 8634 } 8635 8636 if (cpu_isar_feature(aa64_sve, cpu)) { 8637 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); 8638 if (arm_feature(env, ARM_FEATURE_EL2)) { 8639 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); 8640 } else { 8641 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); 8642 } 8643 if (arm_feature(env, ARM_FEATURE_EL3)) { 8644 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); 8645 } 8646 } 8647 8648 #ifdef TARGET_AARCH64 8649 if (cpu_isar_feature(aa64_pauth, cpu)) { 8650 define_arm_cp_regs(cpu, pauth_reginfo); 8651 } 8652 if (cpu_isar_feature(aa64_rndr, cpu)) { 8653 define_arm_cp_regs(cpu, rndr_reginfo); 8654 } 8655 if (cpu_isar_feature(aa64_tlbirange, cpu)) { 8656 define_arm_cp_regs(cpu, tlbirange_reginfo); 8657 } 8658 if (cpu_isar_feature(aa64_tlbios, cpu)) { 8659 define_arm_cp_regs(cpu, tlbios_reginfo); 8660 } 8661 #ifndef CONFIG_USER_ONLY 8662 /* Data Cache clean instructions up to PoP */ 8663 if (cpu_isar_feature(aa64_dcpop, cpu)) { 8664 define_one_arm_cp_reg(cpu, dcpop_reg); 8665 8666 if (cpu_isar_feature(aa64_dcpodp, cpu)) { 8667 define_one_arm_cp_reg(cpu, dcpodp_reg); 8668 } 8669 } 8670 #endif /*CONFIG_USER_ONLY*/ 8671 8672 /* 8673 * If full MTE is enabled, add all of the system registers. 8674 * If only "instructions available at EL0" are enabled, 8675 * then define only a RAZ/WI version of PSTATE.TCO. 8676 */ 8677 if (cpu_isar_feature(aa64_mte, cpu)) { 8678 define_arm_cp_regs(cpu, mte_reginfo); 8679 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo); 8680 } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) { 8681 define_arm_cp_regs(cpu, mte_tco_ro_reginfo); 8682 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo); 8683 } 8684 #endif 8685 8686 if (cpu_isar_feature(any_predinv, cpu)) { 8687 define_arm_cp_regs(cpu, predinv_reginfo); 8688 } 8689 8690 if (cpu_isar_feature(any_ccidx, cpu)) { 8691 define_arm_cp_regs(cpu, ccsidr2_reginfo); 8692 } 8693 8694 #ifndef CONFIG_USER_ONLY 8695 /* 8696 * Register redirections and aliases must be done last, 8697 * after the registers from the other extensions have been defined. 8698 */ 8699 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { 8700 define_arm_vh_e2h_redirects_aliases(cpu); 8701 } 8702 #endif 8703 } 8704 8705 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) 8706 { 8707 CPUState *cs = CPU(cpu); 8708 CPUARMState *env = &cpu->env; 8709 8710 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 8711 /* 8712 * The lower part of each SVE register aliases to the FPU 8713 * registers so we don't need to include both. 8714 */ 8715 #ifdef TARGET_AARCH64 8716 if (isar_feature_aa64_sve(&cpu->isar)) { 8717 gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg, 8718 arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs), 8719 "sve-registers.xml", 0); 8720 } else 8721 #endif 8722 { 8723 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, 8724 aarch64_fpu_gdb_set_reg, 8725 34, "aarch64-fpu.xml", 0); 8726 } 8727 } else if (arm_feature(env, ARM_FEATURE_NEON)) { 8728 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 8729 51, "arm-neon.xml", 0); 8730 } else if (cpu_isar_feature(aa32_simd_r32, cpu)) { 8731 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 8732 35, "arm-vfp3.xml", 0); 8733 } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) { 8734 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 8735 19, "arm-vfp.xml", 0); 8736 } 8737 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, 8738 arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs), 8739 "system-registers.xml", 0); 8740 8741 } 8742 8743 /* Sort alphabetically by type name, except for "any". */ 8744 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) 8745 { 8746 ObjectClass *class_a = (ObjectClass *)a; 8747 ObjectClass *class_b = (ObjectClass *)b; 8748 const char *name_a, *name_b; 8749 8750 name_a = object_class_get_name(class_a); 8751 name_b = object_class_get_name(class_b); 8752 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { 8753 return 1; 8754 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { 8755 return -1; 8756 } else { 8757 return strcmp(name_a, name_b); 8758 } 8759 } 8760 8761 static void arm_cpu_list_entry(gpointer data, gpointer user_data) 8762 { 8763 ObjectClass *oc = data; 8764 const char *typename; 8765 char *name; 8766 8767 typename = object_class_get_name(oc); 8768 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); 8769 qemu_printf(" %s\n", name); 8770 g_free(name); 8771 } 8772 8773 void arm_cpu_list(void) 8774 { 8775 GSList *list; 8776 8777 list = object_class_get_list(TYPE_ARM_CPU, false); 8778 list = g_slist_sort(list, arm_cpu_list_compare); 8779 qemu_printf("Available CPUs:\n"); 8780 g_slist_foreach(list, arm_cpu_list_entry, NULL); 8781 g_slist_free(list); 8782 } 8783 8784 static void arm_cpu_add_definition(gpointer data, gpointer user_data) 8785 { 8786 ObjectClass *oc = data; 8787 CpuDefinitionInfoList **cpu_list = user_data; 8788 CpuDefinitionInfo *info; 8789 const char *typename; 8790 8791 typename = object_class_get_name(oc); 8792 info = g_malloc0(sizeof(*info)); 8793 info->name = g_strndup(typename, 8794 strlen(typename) - strlen("-" TYPE_ARM_CPU)); 8795 info->q_typename = g_strdup(typename); 8796 8797 QAPI_LIST_PREPEND(*cpu_list, info); 8798 } 8799 8800 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 8801 { 8802 CpuDefinitionInfoList *cpu_list = NULL; 8803 GSList *list; 8804 8805 list = object_class_get_list(TYPE_ARM_CPU, false); 8806 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); 8807 g_slist_free(list); 8808 8809 return cpu_list; 8810 } 8811 8812 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, 8813 void *opaque, int state, int secstate, 8814 int crm, int opc1, int opc2, 8815 const char *name) 8816 { 8817 /* Private utility function for define_one_arm_cp_reg_with_opaque(): 8818 * add a single reginfo struct to the hash table. 8819 */ 8820 uint32_t *key = g_new(uint32_t, 1); 8821 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); 8822 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; 8823 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; 8824 8825 r2->name = g_strdup(name); 8826 /* Reset the secure state to the specific incoming state. This is 8827 * necessary as the register may have been defined with both states. 8828 */ 8829 r2->secure = secstate; 8830 8831 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 8832 /* Register is banked (using both entries in array). 8833 * Overwriting fieldoffset as the array is only used to define 8834 * banked registers but later only fieldoffset is used. 8835 */ 8836 r2->fieldoffset = r->bank_fieldoffsets[ns]; 8837 } 8838 8839 if (state == ARM_CP_STATE_AA32) { 8840 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 8841 /* If the register is banked then we don't need to migrate or 8842 * reset the 32-bit instance in certain cases: 8843 * 8844 * 1) If the register has both 32-bit and 64-bit instances then we 8845 * can count on the 64-bit instance taking care of the 8846 * non-secure bank. 8847 * 2) If ARMv8 is enabled then we can count on a 64-bit version 8848 * taking care of the secure bank. This requires that separate 8849 * 32 and 64-bit definitions are provided. 8850 */ 8851 if ((r->state == ARM_CP_STATE_BOTH && ns) || 8852 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { 8853 r2->type |= ARM_CP_ALIAS; 8854 } 8855 } else if ((secstate != r->secure) && !ns) { 8856 /* The register is not banked so we only want to allow migration of 8857 * the non-secure instance. 8858 */ 8859 r2->type |= ARM_CP_ALIAS; 8860 } 8861 8862 if (r->state == ARM_CP_STATE_BOTH) { 8863 /* We assume it is a cp15 register if the .cp field is left unset. 8864 */ 8865 if (r2->cp == 0) { 8866 r2->cp = 15; 8867 } 8868 8869 #ifdef HOST_WORDS_BIGENDIAN 8870 if (r2->fieldoffset) { 8871 r2->fieldoffset += sizeof(uint32_t); 8872 } 8873 #endif 8874 } 8875 } 8876 if (state == ARM_CP_STATE_AA64) { 8877 /* To allow abbreviation of ARMCPRegInfo 8878 * definitions, we treat cp == 0 as equivalent to 8879 * the value for "standard guest-visible sysreg". 8880 * STATE_BOTH definitions are also always "standard 8881 * sysreg" in their AArch64 view (the .cp value may 8882 * be non-zero for the benefit of the AArch32 view). 8883 */ 8884 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { 8885 r2->cp = CP_REG_ARM64_SYSREG_CP; 8886 } 8887 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, 8888 r2->opc0, opc1, opc2); 8889 } else { 8890 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); 8891 } 8892 if (opaque) { 8893 r2->opaque = opaque; 8894 } 8895 /* reginfo passed to helpers is correct for the actual access, 8896 * and is never ARM_CP_STATE_BOTH: 8897 */ 8898 r2->state = state; 8899 /* Make sure reginfo passed to helpers for wildcarded regs 8900 * has the correct crm/opc1/opc2 for this reg, not CP_ANY: 8901 */ 8902 r2->crm = crm; 8903 r2->opc1 = opc1; 8904 r2->opc2 = opc2; 8905 /* By convention, for wildcarded registers only the first 8906 * entry is used for migration; the others are marked as 8907 * ALIAS so we don't try to transfer the register 8908 * multiple times. Special registers (ie NOP/WFI) are 8909 * never migratable and not even raw-accessible. 8910 */ 8911 if ((r->type & ARM_CP_SPECIAL)) { 8912 r2->type |= ARM_CP_NO_RAW; 8913 } 8914 if (((r->crm == CP_ANY) && crm != 0) || 8915 ((r->opc1 == CP_ANY) && opc1 != 0) || 8916 ((r->opc2 == CP_ANY) && opc2 != 0)) { 8917 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; 8918 } 8919 8920 /* Check that raw accesses are either forbidden or handled. Note that 8921 * we can't assert this earlier because the setup of fieldoffset for 8922 * banked registers has to be done first. 8923 */ 8924 if (!(r2->type & ARM_CP_NO_RAW)) { 8925 assert(!raw_accessors_invalid(r2)); 8926 } 8927 8928 /* Overriding of an existing definition must be explicitly 8929 * requested. 8930 */ 8931 if (!(r->type & ARM_CP_OVERRIDE)) { 8932 ARMCPRegInfo *oldreg; 8933 oldreg = g_hash_table_lookup(cpu->cp_regs, key); 8934 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { 8935 fprintf(stderr, "Register redefined: cp=%d %d bit " 8936 "crn=%d crm=%d opc1=%d opc2=%d, " 8937 "was %s, now %s\n", r2->cp, 32 + 32 * is64, 8938 r2->crn, r2->crm, r2->opc1, r2->opc2, 8939 oldreg->name, r2->name); 8940 g_assert_not_reached(); 8941 } 8942 } 8943 g_hash_table_insert(cpu->cp_regs, key, r2); 8944 } 8945 8946 8947 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 8948 const ARMCPRegInfo *r, void *opaque) 8949 { 8950 /* Define implementations of coprocessor registers. 8951 * We store these in a hashtable because typically 8952 * there are less than 150 registers in a space which 8953 * is 16*16*16*8*8 = 262144 in size. 8954 * Wildcarding is supported for the crm, opc1 and opc2 fields. 8955 * If a register is defined twice then the second definition is 8956 * used, so this can be used to define some generic registers and 8957 * then override them with implementation specific variations. 8958 * At least one of the original and the second definition should 8959 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard 8960 * against accidental use. 8961 * 8962 * The state field defines whether the register is to be 8963 * visible in the AArch32 or AArch64 execution state. If the 8964 * state is set to ARM_CP_STATE_BOTH then we synthesise a 8965 * reginfo structure for the AArch32 view, which sees the lower 8966 * 32 bits of the 64 bit register. 8967 * 8968 * Only registers visible in AArch64 may set r->opc0; opc0 cannot 8969 * be wildcarded. AArch64 registers are always considered to be 64 8970 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of 8971 * the register, if any. 8972 */ 8973 int crm, opc1, opc2, state; 8974 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; 8975 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; 8976 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; 8977 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; 8978 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; 8979 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; 8980 /* 64 bit registers have only CRm and Opc1 fields */ 8981 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); 8982 /* op0 only exists in the AArch64 encodings */ 8983 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); 8984 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ 8985 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); 8986 /* 8987 * This API is only for Arm's system coprocessors (14 and 15) or 8988 * (M-profile or v7A-and-earlier only) for implementation defined 8989 * coprocessors in the range 0..7. Our decode assumes this, since 8990 * 8..13 can be used for other insns including VFP and Neon. See 8991 * valid_cp() in translate.c. Assert here that we haven't tried 8992 * to use an invalid coprocessor number. 8993 */ 8994 switch (r->state) { 8995 case ARM_CP_STATE_BOTH: 8996 /* 0 has a special meaning, but otherwise the same rules as AA32. */ 8997 if (r->cp == 0) { 8998 break; 8999 } 9000 /* fall through */ 9001 case ARM_CP_STATE_AA32: 9002 if (arm_feature(&cpu->env, ARM_FEATURE_V8) && 9003 !arm_feature(&cpu->env, ARM_FEATURE_M)) { 9004 assert(r->cp >= 14 && r->cp <= 15); 9005 } else { 9006 assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15)); 9007 } 9008 break; 9009 case ARM_CP_STATE_AA64: 9010 assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP); 9011 break; 9012 default: 9013 g_assert_not_reached(); 9014 } 9015 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 9016 * encodes a minimum access level for the register. We roll this 9017 * runtime check into our general permission check code, so check 9018 * here that the reginfo's specified permissions are strict enough 9019 * to encompass the generic architectural permission check. 9020 */ 9021 if (r->state != ARM_CP_STATE_AA32) { 9022 int mask = 0; 9023 switch (r->opc1) { 9024 case 0: 9025 /* min_EL EL1, but some accessible to EL0 via kernel ABI */ 9026 mask = PL0U_R | PL1_RW; 9027 break; 9028 case 1: case 2: 9029 /* min_EL EL1 */ 9030 mask = PL1_RW; 9031 break; 9032 case 3: 9033 /* min_EL EL0 */ 9034 mask = PL0_RW; 9035 break; 9036 case 4: 9037 case 5: 9038 /* min_EL EL2 */ 9039 mask = PL2_RW; 9040 break; 9041 case 6: 9042 /* min_EL EL3 */ 9043 mask = PL3_RW; 9044 break; 9045 case 7: 9046 /* min_EL EL1, secure mode only (we don't check the latter) */ 9047 mask = PL1_RW; 9048 break; 9049 default: 9050 /* broken reginfo with out-of-range opc1 */ 9051 assert(false); 9052 break; 9053 } 9054 /* assert our permissions are not too lax (stricter is fine) */ 9055 assert((r->access & ~mask) == 0); 9056 } 9057 9058 /* Check that the register definition has enough info to handle 9059 * reads and writes if they are permitted. 9060 */ 9061 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { 9062 if (r->access & PL3_R) { 9063 assert((r->fieldoffset || 9064 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 9065 r->readfn); 9066 } 9067 if (r->access & PL3_W) { 9068 assert((r->fieldoffset || 9069 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 9070 r->writefn); 9071 } 9072 } 9073 /* Bad type field probably means missing sentinel at end of reg list */ 9074 assert(cptype_valid(r->type)); 9075 for (crm = crmmin; crm <= crmmax; crm++) { 9076 for (opc1 = opc1min; opc1 <= opc1max; opc1++) { 9077 for (opc2 = opc2min; opc2 <= opc2max; opc2++) { 9078 for (state = ARM_CP_STATE_AA32; 9079 state <= ARM_CP_STATE_AA64; state++) { 9080 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { 9081 continue; 9082 } 9083 if (state == ARM_CP_STATE_AA32) { 9084 /* Under AArch32 CP registers can be common 9085 * (same for secure and non-secure world) or banked. 9086 */ 9087 char *name; 9088 9089 switch (r->secure) { 9090 case ARM_CP_SECSTATE_S: 9091 case ARM_CP_SECSTATE_NS: 9092 add_cpreg_to_hashtable(cpu, r, opaque, state, 9093 r->secure, crm, opc1, opc2, 9094 r->name); 9095 break; 9096 default: 9097 name = g_strdup_printf("%s_S", r->name); 9098 add_cpreg_to_hashtable(cpu, r, opaque, state, 9099 ARM_CP_SECSTATE_S, 9100 crm, opc1, opc2, name); 9101 g_free(name); 9102 add_cpreg_to_hashtable(cpu, r, opaque, state, 9103 ARM_CP_SECSTATE_NS, 9104 crm, opc1, opc2, r->name); 9105 break; 9106 } 9107 } else { 9108 /* AArch64 registers get mapped to non-secure instance 9109 * of AArch32 */ 9110 add_cpreg_to_hashtable(cpu, r, opaque, state, 9111 ARM_CP_SECSTATE_NS, 9112 crm, opc1, opc2, r->name); 9113 } 9114 } 9115 } 9116 } 9117 } 9118 } 9119 9120 void define_arm_cp_regs_with_opaque(ARMCPU *cpu, 9121 const ARMCPRegInfo *regs, void *opaque) 9122 { 9123 /* Define a whole list of registers */ 9124 const ARMCPRegInfo *r; 9125 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 9126 define_one_arm_cp_reg_with_opaque(cpu, r, opaque); 9127 } 9128 } 9129 9130 /* 9131 * Modify ARMCPRegInfo for access from userspace. 9132 * 9133 * This is a data driven modification directed by 9134 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as 9135 * user-space cannot alter any values and dynamic values pertaining to 9136 * execution state are hidden from user space view anyway. 9137 */ 9138 void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods) 9139 { 9140 const ARMCPRegUserSpaceInfo *m; 9141 ARMCPRegInfo *r; 9142 9143 for (m = mods; m->name; m++) { 9144 GPatternSpec *pat = NULL; 9145 if (m->is_glob) { 9146 pat = g_pattern_spec_new(m->name); 9147 } 9148 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 9149 if (pat && g_pattern_match_string(pat, r->name)) { 9150 r->type = ARM_CP_CONST; 9151 r->access = PL0U_R; 9152 r->resetvalue = 0; 9153 /* continue */ 9154 } else if (strcmp(r->name, m->name) == 0) { 9155 r->type = ARM_CP_CONST; 9156 r->access = PL0U_R; 9157 r->resetvalue &= m->exported_bits; 9158 r->resetvalue |= m->fixed_bits; 9159 break; 9160 } 9161 } 9162 if (pat) { 9163 g_pattern_spec_free(pat); 9164 } 9165 } 9166 } 9167 9168 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) 9169 { 9170 return g_hash_table_lookup(cpregs, &encoded_cp); 9171 } 9172 9173 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 9174 uint64_t value) 9175 { 9176 /* Helper coprocessor write function for write-ignore registers */ 9177 } 9178 9179 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) 9180 { 9181 /* Helper coprocessor write function for read-as-zero registers */ 9182 return 0; 9183 } 9184 9185 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) 9186 { 9187 /* Helper coprocessor reset function for do-nothing-on-reset registers */ 9188 } 9189 9190 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) 9191 { 9192 /* Return true if it is not valid for us to switch to 9193 * this CPU mode (ie all the UNPREDICTABLE cases in 9194 * the ARM ARM CPSRWriteByInstr pseudocode). 9195 */ 9196 9197 /* Changes to or from Hyp via MSR and CPS are illegal. */ 9198 if (write_type == CPSRWriteByInstr && 9199 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || 9200 mode == ARM_CPU_MODE_HYP)) { 9201 return 1; 9202 } 9203 9204 switch (mode) { 9205 case ARM_CPU_MODE_USR: 9206 return 0; 9207 case ARM_CPU_MODE_SYS: 9208 case ARM_CPU_MODE_SVC: 9209 case ARM_CPU_MODE_ABT: 9210 case ARM_CPU_MODE_UND: 9211 case ARM_CPU_MODE_IRQ: 9212 case ARM_CPU_MODE_FIQ: 9213 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 9214 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) 9215 */ 9216 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR 9217 * and CPS are treated as illegal mode changes. 9218 */ 9219 if (write_type == CPSRWriteByInstr && 9220 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && 9221 (arm_hcr_el2_eff(env) & HCR_TGE)) { 9222 return 1; 9223 } 9224 return 0; 9225 case ARM_CPU_MODE_HYP: 9226 return !arm_is_el2_enabled(env) || arm_current_el(env) < 2; 9227 case ARM_CPU_MODE_MON: 9228 return arm_current_el(env) < 3; 9229 default: 9230 return 1; 9231 } 9232 } 9233 9234 uint32_t cpsr_read(CPUARMState *env) 9235 { 9236 int ZF; 9237 ZF = (env->ZF == 0); 9238 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 9239 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 9240 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 9241 | ((env->condexec_bits & 0xfc) << 8) 9242 | (env->GE << 16) | (env->daif & CPSR_AIF); 9243 } 9244 9245 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 9246 CPSRWriteType write_type) 9247 { 9248 uint32_t changed_daif; 9249 bool rebuild_hflags = (write_type != CPSRWriteRaw) && 9250 (mask & (CPSR_M | CPSR_E | CPSR_IL)); 9251 9252 if (mask & CPSR_NZCV) { 9253 env->ZF = (~val) & CPSR_Z; 9254 env->NF = val; 9255 env->CF = (val >> 29) & 1; 9256 env->VF = (val << 3) & 0x80000000; 9257 } 9258 if (mask & CPSR_Q) 9259 env->QF = ((val & CPSR_Q) != 0); 9260 if (mask & CPSR_T) 9261 env->thumb = ((val & CPSR_T) != 0); 9262 if (mask & CPSR_IT_0_1) { 9263 env->condexec_bits &= ~3; 9264 env->condexec_bits |= (val >> 25) & 3; 9265 } 9266 if (mask & CPSR_IT_2_7) { 9267 env->condexec_bits &= 3; 9268 env->condexec_bits |= (val >> 8) & 0xfc; 9269 } 9270 if (mask & CPSR_GE) { 9271 env->GE = (val >> 16) & 0xf; 9272 } 9273 9274 /* In a V7 implementation that includes the security extensions but does 9275 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control 9276 * whether non-secure software is allowed to change the CPSR_F and CPSR_A 9277 * bits respectively. 9278 * 9279 * In a V8 implementation, it is permitted for privileged software to 9280 * change the CPSR A/F bits regardless of the SCR.AW/FW bits. 9281 */ 9282 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && 9283 arm_feature(env, ARM_FEATURE_EL3) && 9284 !arm_feature(env, ARM_FEATURE_EL2) && 9285 !arm_is_secure(env)) { 9286 9287 changed_daif = (env->daif ^ val) & mask; 9288 9289 if (changed_daif & CPSR_A) { 9290 /* Check to see if we are allowed to change the masking of async 9291 * abort exceptions from a non-secure state. 9292 */ 9293 if (!(env->cp15.scr_el3 & SCR_AW)) { 9294 qemu_log_mask(LOG_GUEST_ERROR, 9295 "Ignoring attempt to switch CPSR_A flag from " 9296 "non-secure world with SCR.AW bit clear\n"); 9297 mask &= ~CPSR_A; 9298 } 9299 } 9300 9301 if (changed_daif & CPSR_F) { 9302 /* Check to see if we are allowed to change the masking of FIQ 9303 * exceptions from a non-secure state. 9304 */ 9305 if (!(env->cp15.scr_el3 & SCR_FW)) { 9306 qemu_log_mask(LOG_GUEST_ERROR, 9307 "Ignoring attempt to switch CPSR_F flag from " 9308 "non-secure world with SCR.FW bit clear\n"); 9309 mask &= ~CPSR_F; 9310 } 9311 9312 /* Check whether non-maskable FIQ (NMFI) support is enabled. 9313 * If this bit is set software is not allowed to mask 9314 * FIQs, but is allowed to set CPSR_F to 0. 9315 */ 9316 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && 9317 (val & CPSR_F)) { 9318 qemu_log_mask(LOG_GUEST_ERROR, 9319 "Ignoring attempt to enable CPSR_F flag " 9320 "(non-maskable FIQ [NMFI] support enabled)\n"); 9321 mask &= ~CPSR_F; 9322 } 9323 } 9324 } 9325 9326 env->daif &= ~(CPSR_AIF & mask); 9327 env->daif |= val & CPSR_AIF & mask; 9328 9329 if (write_type != CPSRWriteRaw && 9330 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { 9331 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { 9332 /* Note that we can only get here in USR mode if this is a 9333 * gdb stub write; for this case we follow the architectural 9334 * behaviour for guest writes in USR mode of ignoring an attempt 9335 * to switch mode. (Those are caught by translate.c for writes 9336 * triggered by guest instructions.) 9337 */ 9338 mask &= ~CPSR_M; 9339 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { 9340 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in 9341 * v7, and has defined behaviour in v8: 9342 * + leave CPSR.M untouched 9343 * + allow changes to the other CPSR fields 9344 * + set PSTATE.IL 9345 * For user changes via the GDB stub, we don't set PSTATE.IL, 9346 * as this would be unnecessarily harsh for a user error. 9347 */ 9348 mask &= ~CPSR_M; 9349 if (write_type != CPSRWriteByGDBStub && 9350 arm_feature(env, ARM_FEATURE_V8)) { 9351 mask |= CPSR_IL; 9352 val |= CPSR_IL; 9353 } 9354 qemu_log_mask(LOG_GUEST_ERROR, 9355 "Illegal AArch32 mode switch attempt from %s to %s\n", 9356 aarch32_mode_name(env->uncached_cpsr), 9357 aarch32_mode_name(val)); 9358 } else { 9359 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n", 9360 write_type == CPSRWriteExceptionReturn ? 9361 "Exception return from AArch32" : 9362 "AArch32 mode switch from", 9363 aarch32_mode_name(env->uncached_cpsr), 9364 aarch32_mode_name(val), env->regs[15]); 9365 switch_mode(env, val & CPSR_M); 9366 } 9367 } 9368 mask &= ~CACHED_CPSR_BITS; 9369 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 9370 if (rebuild_hflags) { 9371 arm_rebuild_hflags(env); 9372 } 9373 } 9374 9375 /* Sign/zero extend */ 9376 uint32_t HELPER(sxtb16)(uint32_t x) 9377 { 9378 uint32_t res; 9379 res = (uint16_t)(int8_t)x; 9380 res |= (uint32_t)(int8_t)(x >> 16) << 16; 9381 return res; 9382 } 9383 9384 static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra) 9385 { 9386 /* 9387 * Take a division-by-zero exception if necessary; otherwise return 9388 * to get the usual non-trapping division behaviour (result of 0) 9389 */ 9390 if (arm_feature(env, ARM_FEATURE_M) 9391 && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) { 9392 raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra); 9393 } 9394 } 9395 9396 uint32_t HELPER(uxtb16)(uint32_t x) 9397 { 9398 uint32_t res; 9399 res = (uint16_t)(uint8_t)x; 9400 res |= (uint32_t)(uint8_t)(x >> 16) << 16; 9401 return res; 9402 } 9403 9404 int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den) 9405 { 9406 if (den == 0) { 9407 handle_possible_div0_trap(env, GETPC()); 9408 return 0; 9409 } 9410 if (num == INT_MIN && den == -1) { 9411 return INT_MIN; 9412 } 9413 return num / den; 9414 } 9415 9416 uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den) 9417 { 9418 if (den == 0) { 9419 handle_possible_div0_trap(env, GETPC()); 9420 return 0; 9421 } 9422 return num / den; 9423 } 9424 9425 uint32_t HELPER(rbit)(uint32_t x) 9426 { 9427 return revbit32(x); 9428 } 9429 9430 #ifdef CONFIG_USER_ONLY 9431 9432 static void switch_mode(CPUARMState *env, int mode) 9433 { 9434 ARMCPU *cpu = env_archcpu(env); 9435 9436 if (mode != ARM_CPU_MODE_USR) { 9437 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); 9438 } 9439 } 9440 9441 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 9442 uint32_t cur_el, bool secure) 9443 { 9444 return 1; 9445 } 9446 9447 void aarch64_sync_64_to_32(CPUARMState *env) 9448 { 9449 g_assert_not_reached(); 9450 } 9451 9452 #else 9453 9454 static void switch_mode(CPUARMState *env, int mode) 9455 { 9456 int old_mode; 9457 int i; 9458 9459 old_mode = env->uncached_cpsr & CPSR_M; 9460 if (mode == old_mode) 9461 return; 9462 9463 if (old_mode == ARM_CPU_MODE_FIQ) { 9464 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 9465 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 9466 } else if (mode == ARM_CPU_MODE_FIQ) { 9467 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 9468 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 9469 } 9470 9471 i = bank_number(old_mode); 9472 env->banked_r13[i] = env->regs[13]; 9473 env->banked_spsr[i] = env->spsr; 9474 9475 i = bank_number(mode); 9476 env->regs[13] = env->banked_r13[i]; 9477 env->spsr = env->banked_spsr[i]; 9478 9479 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; 9480 env->regs[14] = env->banked_r14[r14_bank_number(mode)]; 9481 } 9482 9483 /* Physical Interrupt Target EL Lookup Table 9484 * 9485 * [ From ARM ARM section G1.13.4 (Table G1-15) ] 9486 * 9487 * The below multi-dimensional table is used for looking up the target 9488 * exception level given numerous condition criteria. Specifically, the 9489 * target EL is based on SCR and HCR routing controls as well as the 9490 * currently executing EL and secure state. 9491 * 9492 * Dimensions: 9493 * target_el_table[2][2][2][2][2][4] 9494 * | | | | | +--- Current EL 9495 * | | | | +------ Non-secure(0)/Secure(1) 9496 * | | | +--------- HCR mask override 9497 * | | +------------ SCR exec state control 9498 * | +--------------- SCR mask override 9499 * +------------------ 32-bit(0)/64-bit(1) EL3 9500 * 9501 * The table values are as such: 9502 * 0-3 = EL0-EL3 9503 * -1 = Cannot occur 9504 * 9505 * The ARM ARM target EL table includes entries indicating that an "exception 9506 * is not taken". The two cases where this is applicable are: 9507 * 1) An exception is taken from EL3 but the SCR does not have the exception 9508 * routed to EL3. 9509 * 2) An exception is taken from EL2 but the HCR does not have the exception 9510 * routed to EL2. 9511 * In these two cases, the below table contain a target of EL1. This value is 9512 * returned as it is expected that the consumer of the table data will check 9513 * for "target EL >= current EL" to ensure the exception is not taken. 9514 * 9515 * SCR HCR 9516 * 64 EA AMO From 9517 * BIT IRQ IMO Non-secure Secure 9518 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 9519 */ 9520 static const int8_t target_el_table[2][2][2][2][2][4] = { 9521 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 9522 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, 9523 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 9524 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, 9525 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 9526 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, 9527 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 9528 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, 9529 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, 9530 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},}, 9531 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },}, 9532 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},}, 9533 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 9534 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, 9535 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },}, 9536 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},}, 9537 }; 9538 9539 /* 9540 * Determine the target EL for physical exceptions 9541 */ 9542 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 9543 uint32_t cur_el, bool secure) 9544 { 9545 CPUARMState *env = cs->env_ptr; 9546 bool rw; 9547 bool scr; 9548 bool hcr; 9549 int target_el; 9550 /* Is the highest EL AArch64? */ 9551 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64); 9552 uint64_t hcr_el2; 9553 9554 if (arm_feature(env, ARM_FEATURE_EL3)) { 9555 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); 9556 } else { 9557 /* Either EL2 is the highest EL (and so the EL2 register width 9558 * is given by is64); or there is no EL2 or EL3, in which case 9559 * the value of 'rw' does not affect the table lookup anyway. 9560 */ 9561 rw = is64; 9562 } 9563 9564 hcr_el2 = arm_hcr_el2_eff(env); 9565 switch (excp_idx) { 9566 case EXCP_IRQ: 9567 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); 9568 hcr = hcr_el2 & HCR_IMO; 9569 break; 9570 case EXCP_FIQ: 9571 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); 9572 hcr = hcr_el2 & HCR_FMO; 9573 break; 9574 default: 9575 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); 9576 hcr = hcr_el2 & HCR_AMO; 9577 break; 9578 }; 9579 9580 /* 9581 * For these purposes, TGE and AMO/IMO/FMO both force the 9582 * interrupt to EL2. Fold TGE into the bit extracted above. 9583 */ 9584 hcr |= (hcr_el2 & HCR_TGE) != 0; 9585 9586 /* Perform a table-lookup for the target EL given the current state */ 9587 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; 9588 9589 assert(target_el > 0); 9590 9591 return target_el; 9592 } 9593 9594 void arm_log_exception(int idx) 9595 { 9596 if (qemu_loglevel_mask(CPU_LOG_INT)) { 9597 const char *exc = NULL; 9598 static const char * const excnames[] = { 9599 [EXCP_UDEF] = "Undefined Instruction", 9600 [EXCP_SWI] = "SVC", 9601 [EXCP_PREFETCH_ABORT] = "Prefetch Abort", 9602 [EXCP_DATA_ABORT] = "Data Abort", 9603 [EXCP_IRQ] = "IRQ", 9604 [EXCP_FIQ] = "FIQ", 9605 [EXCP_BKPT] = "Breakpoint", 9606 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", 9607 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", 9608 [EXCP_HVC] = "Hypervisor Call", 9609 [EXCP_HYP_TRAP] = "Hypervisor Trap", 9610 [EXCP_SMC] = "Secure Monitor Call", 9611 [EXCP_VIRQ] = "Virtual IRQ", 9612 [EXCP_VFIQ] = "Virtual FIQ", 9613 [EXCP_SEMIHOST] = "Semihosting call", 9614 [EXCP_NOCP] = "v7M NOCP UsageFault", 9615 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", 9616 [EXCP_STKOF] = "v8M STKOF UsageFault", 9617 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking", 9618 [EXCP_LSERR] = "v8M LSERR UsageFault", 9619 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault", 9620 [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault", 9621 }; 9622 9623 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { 9624 exc = excnames[idx]; 9625 } 9626 if (!exc) { 9627 exc = "unknown"; 9628 } 9629 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); 9630 } 9631 } 9632 9633 /* 9634 * Function used to synchronize QEMU's AArch64 register set with AArch32 9635 * register set. This is necessary when switching between AArch32 and AArch64 9636 * execution state. 9637 */ 9638 void aarch64_sync_32_to_64(CPUARMState *env) 9639 { 9640 int i; 9641 uint32_t mode = env->uncached_cpsr & CPSR_M; 9642 9643 /* We can blanket copy R[0:7] to X[0:7] */ 9644 for (i = 0; i < 8; i++) { 9645 env->xregs[i] = env->regs[i]; 9646 } 9647 9648 /* 9649 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. 9650 * Otherwise, they come from the banked user regs. 9651 */ 9652 if (mode == ARM_CPU_MODE_FIQ) { 9653 for (i = 8; i < 13; i++) { 9654 env->xregs[i] = env->usr_regs[i - 8]; 9655 } 9656 } else { 9657 for (i = 8; i < 13; i++) { 9658 env->xregs[i] = env->regs[i]; 9659 } 9660 } 9661 9662 /* 9663 * Registers x13-x23 are the various mode SP and FP registers. Registers 9664 * r13 and r14 are only copied if we are in that mode, otherwise we copy 9665 * from the mode banked register. 9666 */ 9667 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 9668 env->xregs[13] = env->regs[13]; 9669 env->xregs[14] = env->regs[14]; 9670 } else { 9671 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; 9672 /* HYP is an exception in that it is copied from r14 */ 9673 if (mode == ARM_CPU_MODE_HYP) { 9674 env->xregs[14] = env->regs[14]; 9675 } else { 9676 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; 9677 } 9678 } 9679 9680 if (mode == ARM_CPU_MODE_HYP) { 9681 env->xregs[15] = env->regs[13]; 9682 } else { 9683 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; 9684 } 9685 9686 if (mode == ARM_CPU_MODE_IRQ) { 9687 env->xregs[16] = env->regs[14]; 9688 env->xregs[17] = env->regs[13]; 9689 } else { 9690 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; 9691 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; 9692 } 9693 9694 if (mode == ARM_CPU_MODE_SVC) { 9695 env->xregs[18] = env->regs[14]; 9696 env->xregs[19] = env->regs[13]; 9697 } else { 9698 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; 9699 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; 9700 } 9701 9702 if (mode == ARM_CPU_MODE_ABT) { 9703 env->xregs[20] = env->regs[14]; 9704 env->xregs[21] = env->regs[13]; 9705 } else { 9706 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; 9707 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; 9708 } 9709 9710 if (mode == ARM_CPU_MODE_UND) { 9711 env->xregs[22] = env->regs[14]; 9712 env->xregs[23] = env->regs[13]; 9713 } else { 9714 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; 9715 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; 9716 } 9717 9718 /* 9719 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 9720 * mode, then we can copy from r8-r14. Otherwise, we copy from the 9721 * FIQ bank for r8-r14. 9722 */ 9723 if (mode == ARM_CPU_MODE_FIQ) { 9724 for (i = 24; i < 31; i++) { 9725 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ 9726 } 9727 } else { 9728 for (i = 24; i < 29; i++) { 9729 env->xregs[i] = env->fiq_regs[i - 24]; 9730 } 9731 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; 9732 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; 9733 } 9734 9735 env->pc = env->regs[15]; 9736 } 9737 9738 /* 9739 * Function used to synchronize QEMU's AArch32 register set with AArch64 9740 * register set. This is necessary when switching between AArch32 and AArch64 9741 * execution state. 9742 */ 9743 void aarch64_sync_64_to_32(CPUARMState *env) 9744 { 9745 int i; 9746 uint32_t mode = env->uncached_cpsr & CPSR_M; 9747 9748 /* We can blanket copy X[0:7] to R[0:7] */ 9749 for (i = 0; i < 8; i++) { 9750 env->regs[i] = env->xregs[i]; 9751 } 9752 9753 /* 9754 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. 9755 * Otherwise, we copy x8-x12 into the banked user regs. 9756 */ 9757 if (mode == ARM_CPU_MODE_FIQ) { 9758 for (i = 8; i < 13; i++) { 9759 env->usr_regs[i - 8] = env->xregs[i]; 9760 } 9761 } else { 9762 for (i = 8; i < 13; i++) { 9763 env->regs[i] = env->xregs[i]; 9764 } 9765 } 9766 9767 /* 9768 * Registers r13 & r14 depend on the current mode. 9769 * If we are in a given mode, we copy the corresponding x registers to r13 9770 * and r14. Otherwise, we copy the x register to the banked r13 and r14 9771 * for the mode. 9772 */ 9773 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 9774 env->regs[13] = env->xregs[13]; 9775 env->regs[14] = env->xregs[14]; 9776 } else { 9777 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; 9778 9779 /* 9780 * HYP is an exception in that it does not have its own banked r14 but 9781 * shares the USR r14 9782 */ 9783 if (mode == ARM_CPU_MODE_HYP) { 9784 env->regs[14] = env->xregs[14]; 9785 } else { 9786 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; 9787 } 9788 } 9789 9790 if (mode == ARM_CPU_MODE_HYP) { 9791 env->regs[13] = env->xregs[15]; 9792 } else { 9793 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; 9794 } 9795 9796 if (mode == ARM_CPU_MODE_IRQ) { 9797 env->regs[14] = env->xregs[16]; 9798 env->regs[13] = env->xregs[17]; 9799 } else { 9800 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; 9801 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; 9802 } 9803 9804 if (mode == ARM_CPU_MODE_SVC) { 9805 env->regs[14] = env->xregs[18]; 9806 env->regs[13] = env->xregs[19]; 9807 } else { 9808 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; 9809 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; 9810 } 9811 9812 if (mode == ARM_CPU_MODE_ABT) { 9813 env->regs[14] = env->xregs[20]; 9814 env->regs[13] = env->xregs[21]; 9815 } else { 9816 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; 9817 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; 9818 } 9819 9820 if (mode == ARM_CPU_MODE_UND) { 9821 env->regs[14] = env->xregs[22]; 9822 env->regs[13] = env->xregs[23]; 9823 } else { 9824 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; 9825 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; 9826 } 9827 9828 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 9829 * mode, then we can copy to r8-r14. Otherwise, we copy to the 9830 * FIQ bank for r8-r14. 9831 */ 9832 if (mode == ARM_CPU_MODE_FIQ) { 9833 for (i = 24; i < 31; i++) { 9834 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ 9835 } 9836 } else { 9837 for (i = 24; i < 29; i++) { 9838 env->fiq_regs[i - 24] = env->xregs[i]; 9839 } 9840 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; 9841 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; 9842 } 9843 9844 env->regs[15] = env->pc; 9845 } 9846 9847 static void take_aarch32_exception(CPUARMState *env, int new_mode, 9848 uint32_t mask, uint32_t offset, 9849 uint32_t newpc) 9850 { 9851 int new_el; 9852 9853 /* Change the CPU state so as to actually take the exception. */ 9854 switch_mode(env, new_mode); 9855 9856 /* 9857 * For exceptions taken to AArch32 we must clear the SS bit in both 9858 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. 9859 */ 9860 env->pstate &= ~PSTATE_SS; 9861 env->spsr = cpsr_read(env); 9862 /* Clear IT bits. */ 9863 env->condexec_bits = 0; 9864 /* Switch to the new mode, and to the correct instruction set. */ 9865 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 9866 9867 /* This must be after mode switching. */ 9868 new_el = arm_current_el(env); 9869 9870 /* Set new mode endianness */ 9871 env->uncached_cpsr &= ~CPSR_E; 9872 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) { 9873 env->uncached_cpsr |= CPSR_E; 9874 } 9875 /* J and IL must always be cleared for exception entry */ 9876 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); 9877 env->daif |= mask; 9878 9879 if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) { 9880 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) { 9881 env->uncached_cpsr |= CPSR_SSBS; 9882 } else { 9883 env->uncached_cpsr &= ~CPSR_SSBS; 9884 } 9885 } 9886 9887 if (new_mode == ARM_CPU_MODE_HYP) { 9888 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; 9889 env->elr_el[2] = env->regs[15]; 9890 } else { 9891 /* CPSR.PAN is normally preserved preserved unless... */ 9892 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) { 9893 switch (new_el) { 9894 case 3: 9895 if (!arm_is_secure_below_el3(env)) { 9896 /* ... the target is EL3, from non-secure state. */ 9897 env->uncached_cpsr &= ~CPSR_PAN; 9898 break; 9899 } 9900 /* ... the target is EL3, from secure state ... */ 9901 /* fall through */ 9902 case 1: 9903 /* ... the target is EL1 and SCTLR.SPAN is 0. */ 9904 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) { 9905 env->uncached_cpsr |= CPSR_PAN; 9906 } 9907 break; 9908 } 9909 } 9910 /* 9911 * this is a lie, as there was no c1_sys on V4T/V5, but who cares 9912 * and we should just guard the thumb mode on V4 9913 */ 9914 if (arm_feature(env, ARM_FEATURE_V4T)) { 9915 env->thumb = 9916 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; 9917 } 9918 env->regs[14] = env->regs[15] + offset; 9919 } 9920 env->regs[15] = newpc; 9921 arm_rebuild_hflags(env); 9922 } 9923 9924 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) 9925 { 9926 /* 9927 * Handle exception entry to Hyp mode; this is sufficiently 9928 * different to entry to other AArch32 modes that we handle it 9929 * separately here. 9930 * 9931 * The vector table entry used is always the 0x14 Hyp mode entry point, 9932 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp. 9933 * The offset applied to the preferred return address is always zero 9934 * (see DDI0487C.a section G1.12.3). 9935 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. 9936 */ 9937 uint32_t addr, mask; 9938 ARMCPU *cpu = ARM_CPU(cs); 9939 CPUARMState *env = &cpu->env; 9940 9941 switch (cs->exception_index) { 9942 case EXCP_UDEF: 9943 addr = 0x04; 9944 break; 9945 case EXCP_SWI: 9946 addr = 0x14; 9947 break; 9948 case EXCP_BKPT: 9949 /* Fall through to prefetch abort. */ 9950 case EXCP_PREFETCH_ABORT: 9951 env->cp15.ifar_s = env->exception.vaddress; 9952 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n", 9953 (uint32_t)env->exception.vaddress); 9954 addr = 0x0c; 9955 break; 9956 case EXCP_DATA_ABORT: 9957 env->cp15.dfar_s = env->exception.vaddress; 9958 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n", 9959 (uint32_t)env->exception.vaddress); 9960 addr = 0x10; 9961 break; 9962 case EXCP_IRQ: 9963 addr = 0x18; 9964 break; 9965 case EXCP_FIQ: 9966 addr = 0x1c; 9967 break; 9968 case EXCP_HVC: 9969 addr = 0x08; 9970 break; 9971 case EXCP_HYP_TRAP: 9972 addr = 0x14; 9973 break; 9974 default: 9975 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9976 } 9977 9978 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { 9979 if (!arm_feature(env, ARM_FEATURE_V8)) { 9980 /* 9981 * QEMU syndrome values are v8-style. v7 has the IL bit 9982 * UNK/SBZP for "field not valid" cases, where v8 uses RES1. 9983 * If this is a v7 CPU, squash the IL bit in those cases. 9984 */ 9985 if (cs->exception_index == EXCP_PREFETCH_ABORT || 9986 (cs->exception_index == EXCP_DATA_ABORT && 9987 !(env->exception.syndrome & ARM_EL_ISV)) || 9988 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { 9989 env->exception.syndrome &= ~ARM_EL_IL; 9990 } 9991 } 9992 env->cp15.esr_el[2] = env->exception.syndrome; 9993 } 9994 9995 if (arm_current_el(env) != 2 && addr < 0x14) { 9996 addr = 0x14; 9997 } 9998 9999 mask = 0; 10000 if (!(env->cp15.scr_el3 & SCR_EA)) { 10001 mask |= CPSR_A; 10002 } 10003 if (!(env->cp15.scr_el3 & SCR_IRQ)) { 10004 mask |= CPSR_I; 10005 } 10006 if (!(env->cp15.scr_el3 & SCR_FIQ)) { 10007 mask |= CPSR_F; 10008 } 10009 10010 addr += env->cp15.hvbar; 10011 10012 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr); 10013 } 10014 10015 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) 10016 { 10017 ARMCPU *cpu = ARM_CPU(cs); 10018 CPUARMState *env = &cpu->env; 10019 uint32_t addr; 10020 uint32_t mask; 10021 int new_mode; 10022 uint32_t offset; 10023 uint32_t moe; 10024 10025 /* If this is a debug exception we must update the DBGDSCR.MOE bits */ 10026 switch (syn_get_ec(env->exception.syndrome)) { 10027 case EC_BREAKPOINT: 10028 case EC_BREAKPOINT_SAME_EL: 10029 moe = 1; 10030 break; 10031 case EC_WATCHPOINT: 10032 case EC_WATCHPOINT_SAME_EL: 10033 moe = 10; 10034 break; 10035 case EC_AA32_BKPT: 10036 moe = 3; 10037 break; 10038 case EC_VECTORCATCH: 10039 moe = 5; 10040 break; 10041 default: 10042 moe = 0; 10043 break; 10044 } 10045 10046 if (moe) { 10047 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); 10048 } 10049 10050 if (env->exception.target_el == 2) { 10051 arm_cpu_do_interrupt_aarch32_hyp(cs); 10052 return; 10053 } 10054 10055 switch (cs->exception_index) { 10056 case EXCP_UDEF: 10057 new_mode = ARM_CPU_MODE_UND; 10058 addr = 0x04; 10059 mask = CPSR_I; 10060 if (env->thumb) 10061 offset = 2; 10062 else 10063 offset = 4; 10064 break; 10065 case EXCP_SWI: 10066 new_mode = ARM_CPU_MODE_SVC; 10067 addr = 0x08; 10068 mask = CPSR_I; 10069 /* The PC already points to the next instruction. */ 10070 offset = 0; 10071 break; 10072 case EXCP_BKPT: 10073 /* Fall through to prefetch abort. */ 10074 case EXCP_PREFETCH_ABORT: 10075 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); 10076 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); 10077 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", 10078 env->exception.fsr, (uint32_t)env->exception.vaddress); 10079 new_mode = ARM_CPU_MODE_ABT; 10080 addr = 0x0c; 10081 mask = CPSR_A | CPSR_I; 10082 offset = 4; 10083 break; 10084 case EXCP_DATA_ABORT: 10085 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 10086 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); 10087 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", 10088 env->exception.fsr, 10089 (uint32_t)env->exception.vaddress); 10090 new_mode = ARM_CPU_MODE_ABT; 10091 addr = 0x10; 10092 mask = CPSR_A | CPSR_I; 10093 offset = 8; 10094 break; 10095 case EXCP_IRQ: 10096 new_mode = ARM_CPU_MODE_IRQ; 10097 addr = 0x18; 10098 /* Disable IRQ and imprecise data aborts. */ 10099 mask = CPSR_A | CPSR_I; 10100 offset = 4; 10101 if (env->cp15.scr_el3 & SCR_IRQ) { 10102 /* IRQ routed to monitor mode */ 10103 new_mode = ARM_CPU_MODE_MON; 10104 mask |= CPSR_F; 10105 } 10106 break; 10107 case EXCP_FIQ: 10108 new_mode = ARM_CPU_MODE_FIQ; 10109 addr = 0x1c; 10110 /* Disable FIQ, IRQ and imprecise data aborts. */ 10111 mask = CPSR_A | CPSR_I | CPSR_F; 10112 if (env->cp15.scr_el3 & SCR_FIQ) { 10113 /* FIQ routed to monitor mode */ 10114 new_mode = ARM_CPU_MODE_MON; 10115 } 10116 offset = 4; 10117 break; 10118 case EXCP_VIRQ: 10119 new_mode = ARM_CPU_MODE_IRQ; 10120 addr = 0x18; 10121 /* Disable IRQ and imprecise data aborts. */ 10122 mask = CPSR_A | CPSR_I; 10123 offset = 4; 10124 break; 10125 case EXCP_VFIQ: 10126 new_mode = ARM_CPU_MODE_FIQ; 10127 addr = 0x1c; 10128 /* Disable FIQ, IRQ and imprecise data aborts. */ 10129 mask = CPSR_A | CPSR_I | CPSR_F; 10130 offset = 4; 10131 break; 10132 case EXCP_SMC: 10133 new_mode = ARM_CPU_MODE_MON; 10134 addr = 0x08; 10135 mask = CPSR_A | CPSR_I | CPSR_F; 10136 offset = 0; 10137 break; 10138 default: 10139 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 10140 return; /* Never happens. Keep compiler happy. */ 10141 } 10142 10143 if (new_mode == ARM_CPU_MODE_MON) { 10144 addr += env->cp15.mvbar; 10145 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { 10146 /* High vectors. When enabled, base address cannot be remapped. */ 10147 addr += 0xffff0000; 10148 } else { 10149 /* ARM v7 architectures provide a vector base address register to remap 10150 * the interrupt vector table. 10151 * This register is only followed in non-monitor mode, and is banked. 10152 * Note: only bits 31:5 are valid. 10153 */ 10154 addr += A32_BANKED_CURRENT_REG_GET(env, vbar); 10155 } 10156 10157 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 10158 env->cp15.scr_el3 &= ~SCR_NS; 10159 } 10160 10161 take_aarch32_exception(env, new_mode, mask, offset, addr); 10162 } 10163 10164 static int aarch64_regnum(CPUARMState *env, int aarch32_reg) 10165 { 10166 /* 10167 * Return the register number of the AArch64 view of the AArch32 10168 * register @aarch32_reg. The CPUARMState CPSR is assumed to still 10169 * be that of the AArch32 mode the exception came from. 10170 */ 10171 int mode = env->uncached_cpsr & CPSR_M; 10172 10173 switch (aarch32_reg) { 10174 case 0 ... 7: 10175 return aarch32_reg; 10176 case 8 ... 12: 10177 return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg; 10178 case 13: 10179 switch (mode) { 10180 case ARM_CPU_MODE_USR: 10181 case ARM_CPU_MODE_SYS: 10182 return 13; 10183 case ARM_CPU_MODE_HYP: 10184 return 15; 10185 case ARM_CPU_MODE_IRQ: 10186 return 17; 10187 case ARM_CPU_MODE_SVC: 10188 return 19; 10189 case ARM_CPU_MODE_ABT: 10190 return 21; 10191 case ARM_CPU_MODE_UND: 10192 return 23; 10193 case ARM_CPU_MODE_FIQ: 10194 return 29; 10195 default: 10196 g_assert_not_reached(); 10197 } 10198 case 14: 10199 switch (mode) { 10200 case ARM_CPU_MODE_USR: 10201 case ARM_CPU_MODE_SYS: 10202 case ARM_CPU_MODE_HYP: 10203 return 14; 10204 case ARM_CPU_MODE_IRQ: 10205 return 16; 10206 case ARM_CPU_MODE_SVC: 10207 return 18; 10208 case ARM_CPU_MODE_ABT: 10209 return 20; 10210 case ARM_CPU_MODE_UND: 10211 return 22; 10212 case ARM_CPU_MODE_FIQ: 10213 return 30; 10214 default: 10215 g_assert_not_reached(); 10216 } 10217 case 15: 10218 return 31; 10219 default: 10220 g_assert_not_reached(); 10221 } 10222 } 10223 10224 static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env) 10225 { 10226 uint32_t ret = cpsr_read(env); 10227 10228 /* Move DIT to the correct location for SPSR_ELx */ 10229 if (ret & CPSR_DIT) { 10230 ret &= ~CPSR_DIT; 10231 ret |= PSTATE_DIT; 10232 } 10233 /* Merge PSTATE.SS into SPSR_ELx */ 10234 ret |= env->pstate & PSTATE_SS; 10235 10236 return ret; 10237 } 10238 10239 /* Handle exception entry to a target EL which is using AArch64 */ 10240 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) 10241 { 10242 ARMCPU *cpu = ARM_CPU(cs); 10243 CPUARMState *env = &cpu->env; 10244 unsigned int new_el = env->exception.target_el; 10245 target_ulong addr = env->cp15.vbar_el[new_el]; 10246 unsigned int new_mode = aarch64_pstate_mode(new_el, true); 10247 unsigned int old_mode; 10248 unsigned int cur_el = arm_current_el(env); 10249 int rt; 10250 10251 /* 10252 * Note that new_el can never be 0. If cur_el is 0, then 10253 * el0_a64 is is_a64(), else el0_a64 is ignored. 10254 */ 10255 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); 10256 10257 if (cur_el < new_el) { 10258 /* Entry vector offset depends on whether the implemented EL 10259 * immediately lower than the target level is using AArch32 or AArch64 10260 */ 10261 bool is_aa64; 10262 uint64_t hcr; 10263 10264 switch (new_el) { 10265 case 3: 10266 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; 10267 break; 10268 case 2: 10269 hcr = arm_hcr_el2_eff(env); 10270 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 10271 is_aa64 = (hcr & HCR_RW) != 0; 10272 break; 10273 } 10274 /* fall through */ 10275 case 1: 10276 is_aa64 = is_a64(env); 10277 break; 10278 default: 10279 g_assert_not_reached(); 10280 } 10281 10282 if (is_aa64) { 10283 addr += 0x400; 10284 } else { 10285 addr += 0x600; 10286 } 10287 } else if (pstate_read(env) & PSTATE_SP) { 10288 addr += 0x200; 10289 } 10290 10291 switch (cs->exception_index) { 10292 case EXCP_PREFETCH_ABORT: 10293 case EXCP_DATA_ABORT: 10294 env->cp15.far_el[new_el] = env->exception.vaddress; 10295 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", 10296 env->cp15.far_el[new_el]); 10297 /* fall through */ 10298 case EXCP_BKPT: 10299 case EXCP_UDEF: 10300 case EXCP_SWI: 10301 case EXCP_HVC: 10302 case EXCP_HYP_TRAP: 10303 case EXCP_SMC: 10304 switch (syn_get_ec(env->exception.syndrome)) { 10305 case EC_ADVSIMDFPACCESSTRAP: 10306 /* 10307 * QEMU internal FP/SIMD syndromes from AArch32 include the 10308 * TA and coproc fields which are only exposed if the exception 10309 * is taken to AArch32 Hyp mode. Mask them out to get a valid 10310 * AArch64 format syndrome. 10311 */ 10312 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); 10313 break; 10314 case EC_CP14RTTRAP: 10315 case EC_CP15RTTRAP: 10316 case EC_CP14DTTRAP: 10317 /* 10318 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently 10319 * the raw register field from the insn; when taking this to 10320 * AArch64 we must convert it to the AArch64 view of the register 10321 * number. Notice that we read a 4-bit AArch32 register number and 10322 * write back a 5-bit AArch64 one. 10323 */ 10324 rt = extract32(env->exception.syndrome, 5, 4); 10325 rt = aarch64_regnum(env, rt); 10326 env->exception.syndrome = deposit32(env->exception.syndrome, 10327 5, 5, rt); 10328 break; 10329 case EC_CP15RRTTRAP: 10330 case EC_CP14RRTTRAP: 10331 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */ 10332 rt = extract32(env->exception.syndrome, 5, 4); 10333 rt = aarch64_regnum(env, rt); 10334 env->exception.syndrome = deposit32(env->exception.syndrome, 10335 5, 5, rt); 10336 rt = extract32(env->exception.syndrome, 10, 4); 10337 rt = aarch64_regnum(env, rt); 10338 env->exception.syndrome = deposit32(env->exception.syndrome, 10339 10, 5, rt); 10340 break; 10341 } 10342 env->cp15.esr_el[new_el] = env->exception.syndrome; 10343 break; 10344 case EXCP_IRQ: 10345 case EXCP_VIRQ: 10346 addr += 0x80; 10347 break; 10348 case EXCP_FIQ: 10349 case EXCP_VFIQ: 10350 addr += 0x100; 10351 break; 10352 default: 10353 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 10354 } 10355 10356 if (is_a64(env)) { 10357 old_mode = pstate_read(env); 10358 aarch64_save_sp(env, arm_current_el(env)); 10359 env->elr_el[new_el] = env->pc; 10360 } else { 10361 old_mode = cpsr_read_for_spsr_elx(env); 10362 env->elr_el[new_el] = env->regs[15]; 10363 10364 aarch64_sync_32_to_64(env); 10365 10366 env->condexec_bits = 0; 10367 } 10368 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; 10369 10370 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", 10371 env->elr_el[new_el]); 10372 10373 if (cpu_isar_feature(aa64_pan, cpu)) { 10374 /* The value of PSTATE.PAN is normally preserved, except when ... */ 10375 new_mode |= old_mode & PSTATE_PAN; 10376 switch (new_el) { 10377 case 2: 10378 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */ 10379 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) 10380 != (HCR_E2H | HCR_TGE)) { 10381 break; 10382 } 10383 /* fall through */ 10384 case 1: 10385 /* ... the target is EL1 ... */ 10386 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */ 10387 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) { 10388 new_mode |= PSTATE_PAN; 10389 } 10390 break; 10391 } 10392 } 10393 if (cpu_isar_feature(aa64_mte, cpu)) { 10394 new_mode |= PSTATE_TCO; 10395 } 10396 10397 if (cpu_isar_feature(aa64_ssbs, cpu)) { 10398 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) { 10399 new_mode |= PSTATE_SSBS; 10400 } else { 10401 new_mode &= ~PSTATE_SSBS; 10402 } 10403 } 10404 10405 pstate_write(env, PSTATE_DAIF | new_mode); 10406 env->aarch64 = 1; 10407 aarch64_restore_sp(env, new_el); 10408 helper_rebuild_hflags_a64(env, new_el); 10409 10410 env->pc = addr; 10411 10412 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", 10413 new_el, env->pc, pstate_read(env)); 10414 } 10415 10416 /* 10417 * Do semihosting call and set the appropriate return value. All the 10418 * permission and validity checks have been done at translate time. 10419 * 10420 * We only see semihosting exceptions in TCG only as they are not 10421 * trapped to the hypervisor in KVM. 10422 */ 10423 #ifdef CONFIG_TCG 10424 static void handle_semihosting(CPUState *cs) 10425 { 10426 ARMCPU *cpu = ARM_CPU(cs); 10427 CPUARMState *env = &cpu->env; 10428 10429 if (is_a64(env)) { 10430 qemu_log_mask(CPU_LOG_INT, 10431 "...handling as semihosting call 0x%" PRIx64 "\n", 10432 env->xregs[0]); 10433 env->xregs[0] = do_common_semihosting(cs); 10434 env->pc += 4; 10435 } else { 10436 qemu_log_mask(CPU_LOG_INT, 10437 "...handling as semihosting call 0x%x\n", 10438 env->regs[0]); 10439 env->regs[0] = do_common_semihosting(cs); 10440 env->regs[15] += env->thumb ? 2 : 4; 10441 } 10442 } 10443 #endif 10444 10445 /* Handle a CPU exception for A and R profile CPUs. 10446 * Do any appropriate logging, handle PSCI calls, and then hand off 10447 * to the AArch64-entry or AArch32-entry function depending on the 10448 * target exception level's register width. 10449 * 10450 * Note: this is used for both TCG (as the do_interrupt tcg op), 10451 * and KVM to re-inject guest debug exceptions, and to 10452 * inject a Synchronous-External-Abort. 10453 */ 10454 void arm_cpu_do_interrupt(CPUState *cs) 10455 { 10456 ARMCPU *cpu = ARM_CPU(cs); 10457 CPUARMState *env = &cpu->env; 10458 unsigned int new_el = env->exception.target_el; 10459 10460 assert(!arm_feature(env, ARM_FEATURE_M)); 10461 10462 arm_log_exception(cs->exception_index); 10463 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), 10464 new_el); 10465 if (qemu_loglevel_mask(CPU_LOG_INT) 10466 && !excp_is_internal(cs->exception_index)) { 10467 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", 10468 syn_get_ec(env->exception.syndrome), 10469 env->exception.syndrome); 10470 } 10471 10472 if (arm_is_psci_call(cpu, cs->exception_index)) { 10473 arm_handle_psci_call(cpu); 10474 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); 10475 return; 10476 } 10477 10478 /* 10479 * Semihosting semantics depend on the register width of the code 10480 * that caused the exception, not the target exception level, so 10481 * must be handled here. 10482 */ 10483 #ifdef CONFIG_TCG 10484 if (cs->exception_index == EXCP_SEMIHOST) { 10485 handle_semihosting(cs); 10486 return; 10487 } 10488 #endif 10489 10490 /* Hooks may change global state so BQL should be held, also the 10491 * BQL needs to be held for any modification of 10492 * cs->interrupt_request. 10493 */ 10494 g_assert(qemu_mutex_iothread_locked()); 10495 10496 arm_call_pre_el_change_hook(cpu); 10497 10498 assert(!excp_is_internal(cs->exception_index)); 10499 if (arm_el_is_aa64(env, new_el)) { 10500 arm_cpu_do_interrupt_aarch64(cs); 10501 } else { 10502 arm_cpu_do_interrupt_aarch32(cs); 10503 } 10504 10505 arm_call_el_change_hook(cpu); 10506 10507 if (!kvm_enabled()) { 10508 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; 10509 } 10510 } 10511 #endif /* !CONFIG_USER_ONLY */ 10512 10513 uint64_t arm_sctlr(CPUARMState *env, int el) 10514 { 10515 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */ 10516 if (el == 0) { 10517 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); 10518 el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0) 10519 ? 2 : 1; 10520 } 10521 return env->cp15.sctlr_el[el]; 10522 } 10523 10524 /* Return the SCTLR value which controls this address translation regime */ 10525 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 10526 { 10527 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 10528 } 10529 10530 #ifndef CONFIG_USER_ONLY 10531 10532 /* Return true if the specified stage of address translation is disabled */ 10533 static inline bool regime_translation_disabled(CPUARMState *env, 10534 ARMMMUIdx mmu_idx) 10535 { 10536 uint64_t hcr_el2; 10537 10538 if (arm_feature(env, ARM_FEATURE_M)) { 10539 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & 10540 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 10541 case R_V7M_MPU_CTRL_ENABLE_MASK: 10542 /* Enabled, but not for HardFault and NMI */ 10543 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 10544 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 10545 /* Enabled for all cases */ 10546 return false; 10547 case 0: 10548 default: 10549 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 10550 * we warned about that in armv7m_nvic.c when the guest set it. 10551 */ 10552 return true; 10553 } 10554 } 10555 10556 hcr_el2 = arm_hcr_el2_eff(env); 10557 10558 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { 10559 /* HCR.DC means HCR.VM behaves as 1 */ 10560 return (hcr_el2 & (HCR_DC | HCR_VM)) == 0; 10561 } 10562 10563 if (hcr_el2 & HCR_TGE) { 10564 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */ 10565 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) { 10566 return true; 10567 } 10568 } 10569 10570 if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { 10571 /* HCR.DC means SCTLR_EL1.M behaves as 0 */ 10572 return true; 10573 } 10574 10575 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 10576 } 10577 10578 static inline bool regime_translation_big_endian(CPUARMState *env, 10579 ARMMMUIdx mmu_idx) 10580 { 10581 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 10582 } 10583 10584 /* Return the TTBR associated with this translation regime */ 10585 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, 10586 int ttbrn) 10587 { 10588 if (mmu_idx == ARMMMUIdx_Stage2) { 10589 return env->cp15.vttbr_el2; 10590 } 10591 if (mmu_idx == ARMMMUIdx_Stage2_S) { 10592 return env->cp15.vsttbr_el2; 10593 } 10594 if (ttbrn == 0) { 10595 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 10596 } else { 10597 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 10598 } 10599 } 10600 10601 #endif /* !CONFIG_USER_ONLY */ 10602 10603 /* Convert a possible stage1+2 MMU index into the appropriate 10604 * stage 1 MMU index 10605 */ 10606 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 10607 { 10608 switch (mmu_idx) { 10609 case ARMMMUIdx_SE10_0: 10610 return ARMMMUIdx_Stage1_SE0; 10611 case ARMMMUIdx_SE10_1: 10612 return ARMMMUIdx_Stage1_SE1; 10613 case ARMMMUIdx_SE10_1_PAN: 10614 return ARMMMUIdx_Stage1_SE1_PAN; 10615 case ARMMMUIdx_E10_0: 10616 return ARMMMUIdx_Stage1_E0; 10617 case ARMMMUIdx_E10_1: 10618 return ARMMMUIdx_Stage1_E1; 10619 case ARMMMUIdx_E10_1_PAN: 10620 return ARMMMUIdx_Stage1_E1_PAN; 10621 default: 10622 return mmu_idx; 10623 } 10624 } 10625 10626 /* Return true if the translation regime is using LPAE format page tables */ 10627 static inline bool regime_using_lpae_format(CPUARMState *env, 10628 ARMMMUIdx mmu_idx) 10629 { 10630 int el = regime_el(env, mmu_idx); 10631 if (el == 2 || arm_el_is_aa64(env, el)) { 10632 return true; 10633 } 10634 if (arm_feature(env, ARM_FEATURE_LPAE) 10635 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { 10636 return true; 10637 } 10638 return false; 10639 } 10640 10641 /* Returns true if the stage 1 translation regime is using LPAE format page 10642 * tables. Used when raising alignment exceptions, whose FSR changes depending 10643 * on whether the long or short descriptor format is in use. */ 10644 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 10645 { 10646 mmu_idx = stage_1_mmu_idx(mmu_idx); 10647 10648 return regime_using_lpae_format(env, mmu_idx); 10649 } 10650 10651 #ifndef CONFIG_USER_ONLY 10652 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 10653 { 10654 switch (mmu_idx) { 10655 case ARMMMUIdx_SE10_0: 10656 case ARMMMUIdx_E20_0: 10657 case ARMMMUIdx_SE20_0: 10658 case ARMMMUIdx_Stage1_E0: 10659 case ARMMMUIdx_Stage1_SE0: 10660 case ARMMMUIdx_MUser: 10661 case ARMMMUIdx_MSUser: 10662 case ARMMMUIdx_MUserNegPri: 10663 case ARMMMUIdx_MSUserNegPri: 10664 return true; 10665 default: 10666 return false; 10667 case ARMMMUIdx_E10_0: 10668 case ARMMMUIdx_E10_1: 10669 case ARMMMUIdx_E10_1_PAN: 10670 g_assert_not_reached(); 10671 } 10672 } 10673 10674 /* Translate section/page access permissions to page 10675 * R/W protection flags 10676 * 10677 * @env: CPUARMState 10678 * @mmu_idx: MMU index indicating required translation regime 10679 * @ap: The 3-bit access permissions (AP[2:0]) 10680 * @domain_prot: The 2-bit domain access permissions 10681 */ 10682 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 10683 int ap, int domain_prot) 10684 { 10685 bool is_user = regime_is_user(env, mmu_idx); 10686 10687 if (domain_prot == 3) { 10688 return PAGE_READ | PAGE_WRITE; 10689 } 10690 10691 switch (ap) { 10692 case 0: 10693 if (arm_feature(env, ARM_FEATURE_V7)) { 10694 return 0; 10695 } 10696 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 10697 case SCTLR_S: 10698 return is_user ? 0 : PAGE_READ; 10699 case SCTLR_R: 10700 return PAGE_READ; 10701 default: 10702 return 0; 10703 } 10704 case 1: 10705 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 10706 case 2: 10707 if (is_user) { 10708 return PAGE_READ; 10709 } else { 10710 return PAGE_READ | PAGE_WRITE; 10711 } 10712 case 3: 10713 return PAGE_READ | PAGE_WRITE; 10714 case 4: /* Reserved. */ 10715 return 0; 10716 case 5: 10717 return is_user ? 0 : PAGE_READ; 10718 case 6: 10719 return PAGE_READ; 10720 case 7: 10721 if (!arm_feature(env, ARM_FEATURE_V6K)) { 10722 return 0; 10723 } 10724 return PAGE_READ; 10725 default: 10726 g_assert_not_reached(); 10727 } 10728 } 10729 10730 /* Translate section/page access permissions to page 10731 * R/W protection flags. 10732 * 10733 * @ap: The 2-bit simple AP (AP[2:1]) 10734 * @is_user: TRUE if accessing from PL0 10735 */ 10736 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 10737 { 10738 switch (ap) { 10739 case 0: 10740 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 10741 case 1: 10742 return PAGE_READ | PAGE_WRITE; 10743 case 2: 10744 return is_user ? 0 : PAGE_READ; 10745 case 3: 10746 return PAGE_READ; 10747 default: 10748 g_assert_not_reached(); 10749 } 10750 } 10751 10752 static inline int 10753 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 10754 { 10755 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 10756 } 10757 10758 /* Translate S2 section/page access permissions to protection flags 10759 * 10760 * @env: CPUARMState 10761 * @s2ap: The 2-bit stage2 access permissions (S2AP) 10762 * @xn: XN (execute-never) bits 10763 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0 10764 */ 10765 static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) 10766 { 10767 int prot = 0; 10768 10769 if (s2ap & 1) { 10770 prot |= PAGE_READ; 10771 } 10772 if (s2ap & 2) { 10773 prot |= PAGE_WRITE; 10774 } 10775 10776 if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) { 10777 switch (xn) { 10778 case 0: 10779 prot |= PAGE_EXEC; 10780 break; 10781 case 1: 10782 if (s1_is_el0) { 10783 prot |= PAGE_EXEC; 10784 } 10785 break; 10786 case 2: 10787 break; 10788 case 3: 10789 if (!s1_is_el0) { 10790 prot |= PAGE_EXEC; 10791 } 10792 break; 10793 default: 10794 g_assert_not_reached(); 10795 } 10796 } else { 10797 if (!extract32(xn, 1, 1)) { 10798 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 10799 prot |= PAGE_EXEC; 10800 } 10801 } 10802 } 10803 return prot; 10804 } 10805 10806 /* Translate section/page access permissions to protection flags 10807 * 10808 * @env: CPUARMState 10809 * @mmu_idx: MMU index indicating required translation regime 10810 * @is_aa64: TRUE if AArch64 10811 * @ap: The 2-bit simple AP (AP[2:1]) 10812 * @ns: NS (non-secure) bit 10813 * @xn: XN (execute-never) bit 10814 * @pxn: PXN (privileged execute-never) bit 10815 */ 10816 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 10817 int ap, int ns, int xn, int pxn) 10818 { 10819 bool is_user = regime_is_user(env, mmu_idx); 10820 int prot_rw, user_rw; 10821 bool have_wxn; 10822 int wxn = 0; 10823 10824 assert(mmu_idx != ARMMMUIdx_Stage2); 10825 assert(mmu_idx != ARMMMUIdx_Stage2_S); 10826 10827 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 10828 if (is_user) { 10829 prot_rw = user_rw; 10830 } else { 10831 if (user_rw && regime_is_pan(env, mmu_idx)) { 10832 /* PAN forbids data accesses but doesn't affect insn fetch */ 10833 prot_rw = 0; 10834 } else { 10835 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 10836 } 10837 } 10838 10839 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { 10840 return prot_rw; 10841 } 10842 10843 /* TODO have_wxn should be replaced with 10844 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 10845 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 10846 * compatible processors have EL2, which is required for [U]WXN. 10847 */ 10848 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 10849 10850 if (have_wxn) { 10851 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 10852 } 10853 10854 if (is_aa64) { 10855 if (regime_has_2_ranges(mmu_idx) && !is_user) { 10856 xn = pxn || (user_rw & PAGE_WRITE); 10857 } 10858 } else if (arm_feature(env, ARM_FEATURE_V7)) { 10859 switch (regime_el(env, mmu_idx)) { 10860 case 1: 10861 case 3: 10862 if (is_user) { 10863 xn = xn || !(user_rw & PAGE_READ); 10864 } else { 10865 int uwxn = 0; 10866 if (have_wxn) { 10867 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 10868 } 10869 xn = xn || !(prot_rw & PAGE_READ) || pxn || 10870 (uwxn && (user_rw & PAGE_WRITE)); 10871 } 10872 break; 10873 case 2: 10874 break; 10875 } 10876 } else { 10877 xn = wxn = 0; 10878 } 10879 10880 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 10881 return prot_rw; 10882 } 10883 return prot_rw | PAGE_EXEC; 10884 } 10885 10886 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 10887 uint32_t *table, uint32_t address) 10888 { 10889 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 10890 TCR *tcr = regime_tcr(env, mmu_idx); 10891 10892 if (address & tcr->mask) { 10893 if (tcr->raw_tcr & TTBCR_PD1) { 10894 /* Translation table walk disabled for TTBR1 */ 10895 return false; 10896 } 10897 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 10898 } else { 10899 if (tcr->raw_tcr & TTBCR_PD0) { 10900 /* Translation table walk disabled for TTBR0 */ 10901 return false; 10902 } 10903 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; 10904 } 10905 *table |= (address >> 18) & 0x3ffc; 10906 return true; 10907 } 10908 10909 /* Translate a S1 pagetable walk through S2 if needed. */ 10910 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, 10911 hwaddr addr, bool *is_secure, 10912 ARMMMUFaultInfo *fi) 10913 { 10914 if (arm_mmu_idx_is_stage1_of_2(mmu_idx) && 10915 !regime_translation_disabled(env, ARMMMUIdx_Stage2)) { 10916 target_ulong s2size; 10917 hwaddr s2pa; 10918 int s2prot; 10919 int ret; 10920 ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S 10921 : ARMMMUIdx_Stage2; 10922 ARMCacheAttrs cacheattrs = {}; 10923 MemTxAttrs txattrs = {}; 10924 10925 ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false, 10926 &s2pa, &txattrs, &s2prot, &s2size, fi, 10927 &cacheattrs); 10928 if (ret) { 10929 assert(fi->type != ARMFault_None); 10930 fi->s2addr = addr; 10931 fi->stage2 = true; 10932 fi->s1ptw = true; 10933 fi->s1ns = !*is_secure; 10934 return ~0; 10935 } 10936 if ((arm_hcr_el2_eff(env) & HCR_PTW) && 10937 (cacheattrs.attrs & 0xf0) == 0) { 10938 /* 10939 * PTW set and S1 walk touched S2 Device memory: 10940 * generate Permission fault. 10941 */ 10942 fi->type = ARMFault_Permission; 10943 fi->s2addr = addr; 10944 fi->stage2 = true; 10945 fi->s1ptw = true; 10946 fi->s1ns = !*is_secure; 10947 return ~0; 10948 } 10949 10950 if (arm_is_secure_below_el3(env)) { 10951 /* Check if page table walk is to secure or non-secure PA space. */ 10952 if (*is_secure) { 10953 *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW); 10954 } else { 10955 *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW); 10956 } 10957 } else { 10958 assert(!*is_secure); 10959 } 10960 10961 addr = s2pa; 10962 } 10963 return addr; 10964 } 10965 10966 /* All loads done in the course of a page table walk go through here. */ 10967 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, 10968 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 10969 { 10970 ARMCPU *cpu = ARM_CPU(cs); 10971 CPUARMState *env = &cpu->env; 10972 MemTxAttrs attrs = {}; 10973 MemTxResult result = MEMTX_OK; 10974 AddressSpace *as; 10975 uint32_t data; 10976 10977 addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi); 10978 attrs.secure = is_secure; 10979 as = arm_addressspace(cs, attrs); 10980 if (fi->s1ptw) { 10981 return 0; 10982 } 10983 if (regime_translation_big_endian(env, mmu_idx)) { 10984 data = address_space_ldl_be(as, addr, attrs, &result); 10985 } else { 10986 data = address_space_ldl_le(as, addr, attrs, &result); 10987 } 10988 if (result == MEMTX_OK) { 10989 return data; 10990 } 10991 fi->type = ARMFault_SyncExternalOnWalk; 10992 fi->ea = arm_extabort_type(result); 10993 return 0; 10994 } 10995 10996 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, 10997 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 10998 { 10999 ARMCPU *cpu = ARM_CPU(cs); 11000 CPUARMState *env = &cpu->env; 11001 MemTxAttrs attrs = {}; 11002 MemTxResult result = MEMTX_OK; 11003 AddressSpace *as; 11004 uint64_t data; 11005 11006 addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi); 11007 attrs.secure = is_secure; 11008 as = arm_addressspace(cs, attrs); 11009 if (fi->s1ptw) { 11010 return 0; 11011 } 11012 if (regime_translation_big_endian(env, mmu_idx)) { 11013 data = address_space_ldq_be(as, addr, attrs, &result); 11014 } else { 11015 data = address_space_ldq_le(as, addr, attrs, &result); 11016 } 11017 if (result == MEMTX_OK) { 11018 return data; 11019 } 11020 fi->type = ARMFault_SyncExternalOnWalk; 11021 fi->ea = arm_extabort_type(result); 11022 return 0; 11023 } 11024 11025 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, 11026 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11027 hwaddr *phys_ptr, int *prot, 11028 target_ulong *page_size, 11029 ARMMMUFaultInfo *fi) 11030 { 11031 CPUState *cs = env_cpu(env); 11032 int level = 1; 11033 uint32_t table; 11034 uint32_t desc; 11035 int type; 11036 int ap; 11037 int domain = 0; 11038 int domain_prot; 11039 hwaddr phys_addr; 11040 uint32_t dacr; 11041 11042 /* Pagetable walk. */ 11043 /* Lookup l1 descriptor. */ 11044 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 11045 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 11046 fi->type = ARMFault_Translation; 11047 goto do_fault; 11048 } 11049 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 11050 mmu_idx, fi); 11051 if (fi->type != ARMFault_None) { 11052 goto do_fault; 11053 } 11054 type = (desc & 3); 11055 domain = (desc >> 5) & 0x0f; 11056 if (regime_el(env, mmu_idx) == 1) { 11057 dacr = env->cp15.dacr_ns; 11058 } else { 11059 dacr = env->cp15.dacr_s; 11060 } 11061 domain_prot = (dacr >> (domain * 2)) & 3; 11062 if (type == 0) { 11063 /* Section translation fault. */ 11064 fi->type = ARMFault_Translation; 11065 goto do_fault; 11066 } 11067 if (type != 2) { 11068 level = 2; 11069 } 11070 if (domain_prot == 0 || domain_prot == 2) { 11071 fi->type = ARMFault_Domain; 11072 goto do_fault; 11073 } 11074 if (type == 2) { 11075 /* 1Mb section. */ 11076 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 11077 ap = (desc >> 10) & 3; 11078 *page_size = 1024 * 1024; 11079 } else { 11080 /* Lookup l2 entry. */ 11081 if (type == 1) { 11082 /* Coarse pagetable. */ 11083 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 11084 } else { 11085 /* Fine pagetable. */ 11086 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 11087 } 11088 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 11089 mmu_idx, fi); 11090 if (fi->type != ARMFault_None) { 11091 goto do_fault; 11092 } 11093 switch (desc & 3) { 11094 case 0: /* Page translation fault. */ 11095 fi->type = ARMFault_Translation; 11096 goto do_fault; 11097 case 1: /* 64k page. */ 11098 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 11099 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 11100 *page_size = 0x10000; 11101 break; 11102 case 2: /* 4k page. */ 11103 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 11104 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 11105 *page_size = 0x1000; 11106 break; 11107 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 11108 if (type == 1) { 11109 /* ARMv6/XScale extended small page format */ 11110 if (arm_feature(env, ARM_FEATURE_XSCALE) 11111 || arm_feature(env, ARM_FEATURE_V6)) { 11112 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 11113 *page_size = 0x1000; 11114 } else { 11115 /* UNPREDICTABLE in ARMv5; we choose to take a 11116 * page translation fault. 11117 */ 11118 fi->type = ARMFault_Translation; 11119 goto do_fault; 11120 } 11121 } else { 11122 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 11123 *page_size = 0x400; 11124 } 11125 ap = (desc >> 4) & 3; 11126 break; 11127 default: 11128 /* Never happens, but compiler isn't smart enough to tell. */ 11129 abort(); 11130 } 11131 } 11132 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 11133 *prot |= *prot ? PAGE_EXEC : 0; 11134 if (!(*prot & (1 << access_type))) { 11135 /* Access permission fault. */ 11136 fi->type = ARMFault_Permission; 11137 goto do_fault; 11138 } 11139 *phys_ptr = phys_addr; 11140 return false; 11141 do_fault: 11142 fi->domain = domain; 11143 fi->level = level; 11144 return true; 11145 } 11146 11147 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, 11148 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11149 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 11150 target_ulong *page_size, ARMMMUFaultInfo *fi) 11151 { 11152 CPUState *cs = env_cpu(env); 11153 ARMCPU *cpu = env_archcpu(env); 11154 int level = 1; 11155 uint32_t table; 11156 uint32_t desc; 11157 uint32_t xn; 11158 uint32_t pxn = 0; 11159 int type; 11160 int ap; 11161 int domain = 0; 11162 int domain_prot; 11163 hwaddr phys_addr; 11164 uint32_t dacr; 11165 bool ns; 11166 11167 /* Pagetable walk. */ 11168 /* Lookup l1 descriptor. */ 11169 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 11170 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 11171 fi->type = ARMFault_Translation; 11172 goto do_fault; 11173 } 11174 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 11175 mmu_idx, fi); 11176 if (fi->type != ARMFault_None) { 11177 goto do_fault; 11178 } 11179 type = (desc & 3); 11180 if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) { 11181 /* Section translation fault, or attempt to use the encoding 11182 * which is Reserved on implementations without PXN. 11183 */ 11184 fi->type = ARMFault_Translation; 11185 goto do_fault; 11186 } 11187 if ((type == 1) || !(desc & (1 << 18))) { 11188 /* Page or Section. */ 11189 domain = (desc >> 5) & 0x0f; 11190 } 11191 if (regime_el(env, mmu_idx) == 1) { 11192 dacr = env->cp15.dacr_ns; 11193 } else { 11194 dacr = env->cp15.dacr_s; 11195 } 11196 if (type == 1) { 11197 level = 2; 11198 } 11199 domain_prot = (dacr >> (domain * 2)) & 3; 11200 if (domain_prot == 0 || domain_prot == 2) { 11201 /* Section or Page domain fault */ 11202 fi->type = ARMFault_Domain; 11203 goto do_fault; 11204 } 11205 if (type != 1) { 11206 if (desc & (1 << 18)) { 11207 /* Supersection. */ 11208 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 11209 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 11210 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 11211 *page_size = 0x1000000; 11212 } else { 11213 /* Section. */ 11214 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 11215 *page_size = 0x100000; 11216 } 11217 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 11218 xn = desc & (1 << 4); 11219 pxn = desc & 1; 11220 ns = extract32(desc, 19, 1); 11221 } else { 11222 if (cpu_isar_feature(aa32_pxn, cpu)) { 11223 pxn = (desc >> 2) & 1; 11224 } 11225 ns = extract32(desc, 3, 1); 11226 /* Lookup l2 entry. */ 11227 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 11228 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 11229 mmu_idx, fi); 11230 if (fi->type != ARMFault_None) { 11231 goto do_fault; 11232 } 11233 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 11234 switch (desc & 3) { 11235 case 0: /* Page translation fault. */ 11236 fi->type = ARMFault_Translation; 11237 goto do_fault; 11238 case 1: /* 64k page. */ 11239 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 11240 xn = desc & (1 << 15); 11241 *page_size = 0x10000; 11242 break; 11243 case 2: case 3: /* 4k page. */ 11244 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 11245 xn = desc & 1; 11246 *page_size = 0x1000; 11247 break; 11248 default: 11249 /* Never happens, but compiler isn't smart enough to tell. */ 11250 abort(); 11251 } 11252 } 11253 if (domain_prot == 3) { 11254 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 11255 } else { 11256 if (pxn && !regime_is_user(env, mmu_idx)) { 11257 xn = 1; 11258 } 11259 if (xn && access_type == MMU_INST_FETCH) { 11260 fi->type = ARMFault_Permission; 11261 goto do_fault; 11262 } 11263 11264 if (arm_feature(env, ARM_FEATURE_V6K) && 11265 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 11266 /* The simplified model uses AP[0] as an access control bit. */ 11267 if ((ap & 1) == 0) { 11268 /* Access flag fault. */ 11269 fi->type = ARMFault_AccessFlag; 11270 goto do_fault; 11271 } 11272 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 11273 } else { 11274 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 11275 } 11276 if (*prot && !xn) { 11277 *prot |= PAGE_EXEC; 11278 } 11279 if (!(*prot & (1 << access_type))) { 11280 /* Access permission fault. */ 11281 fi->type = ARMFault_Permission; 11282 goto do_fault; 11283 } 11284 } 11285 if (ns) { 11286 /* The NS bit will (as required by the architecture) have no effect if 11287 * the CPU doesn't support TZ or this is a non-secure translation 11288 * regime, because the attribute will already be non-secure. 11289 */ 11290 attrs->secure = false; 11291 } 11292 *phys_ptr = phys_addr; 11293 return false; 11294 do_fault: 11295 fi->domain = domain; 11296 fi->level = level; 11297 return true; 11298 } 11299 11300 /* 11301 * check_s2_mmu_setup 11302 * @cpu: ARMCPU 11303 * @is_aa64: True if the translation regime is in AArch64 state 11304 * @startlevel: Suggested starting level 11305 * @inputsize: Bitsize of IPAs 11306 * @stride: Page-table stride (See the ARM ARM) 11307 * 11308 * Returns true if the suggested S2 translation parameters are OK and 11309 * false otherwise. 11310 */ 11311 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, 11312 int inputsize, int stride) 11313 { 11314 const int grainsize = stride + 3; 11315 int startsizecheck; 11316 11317 /* Negative levels are never allowed. */ 11318 if (level < 0) { 11319 return false; 11320 } 11321 11322 startsizecheck = inputsize - ((3 - level) * stride + grainsize); 11323 if (startsizecheck < 1 || startsizecheck > stride + 4) { 11324 return false; 11325 } 11326 11327 if (is_aa64) { 11328 CPUARMState *env = &cpu->env; 11329 unsigned int pamax = arm_pamax(cpu); 11330 11331 switch (stride) { 11332 case 13: /* 64KB Pages. */ 11333 if (level == 0 || (level == 1 && pamax <= 42)) { 11334 return false; 11335 } 11336 break; 11337 case 11: /* 16KB Pages. */ 11338 if (level == 0 || (level == 1 && pamax <= 40)) { 11339 return false; 11340 } 11341 break; 11342 case 9: /* 4KB Pages. */ 11343 if (level == 0 && pamax <= 42) { 11344 return false; 11345 } 11346 break; 11347 default: 11348 g_assert_not_reached(); 11349 } 11350 11351 /* Inputsize checks. */ 11352 if (inputsize > pamax && 11353 (arm_el_is_aa64(env, 1) || inputsize > 40)) { 11354 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ 11355 return false; 11356 } 11357 } else { 11358 /* AArch32 only supports 4KB pages. Assert on that. */ 11359 assert(stride == 9); 11360 11361 if (level == 0) { 11362 return false; 11363 } 11364 } 11365 return true; 11366 } 11367 11368 /* Translate from the 4-bit stage 2 representation of 11369 * memory attributes (without cache-allocation hints) to 11370 * the 8-bit representation of the stage 1 MAIR registers 11371 * (which includes allocation hints). 11372 * 11373 * ref: shared/translation/attrs/S2AttrDecode() 11374 * .../S2ConvertAttrsHints() 11375 */ 11376 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) 11377 { 11378 uint8_t hiattr = extract32(s2attrs, 2, 2); 11379 uint8_t loattr = extract32(s2attrs, 0, 2); 11380 uint8_t hihint = 0, lohint = 0; 11381 11382 if (hiattr != 0) { /* normal memory */ 11383 if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */ 11384 hiattr = loattr = 1; /* non-cacheable */ 11385 } else { 11386 if (hiattr != 1) { /* Write-through or write-back */ 11387 hihint = 3; /* RW allocate */ 11388 } 11389 if (loattr != 1) { /* Write-through or write-back */ 11390 lohint = 3; /* RW allocate */ 11391 } 11392 } 11393 } 11394 11395 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 11396 } 11397 #endif /* !CONFIG_USER_ONLY */ 11398 11399 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx) 11400 { 11401 if (regime_has_2_ranges(mmu_idx)) { 11402 return extract64(tcr, 37, 2); 11403 } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { 11404 return 0; /* VTCR_EL2 */ 11405 } else { 11406 /* Replicate the single TBI bit so we always have 2 bits. */ 11407 return extract32(tcr, 20, 1) * 3; 11408 } 11409 } 11410 11411 static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx) 11412 { 11413 if (regime_has_2_ranges(mmu_idx)) { 11414 return extract64(tcr, 51, 2); 11415 } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { 11416 return 0; /* VTCR_EL2 */ 11417 } else { 11418 /* Replicate the single TBID bit so we always have 2 bits. */ 11419 return extract32(tcr, 29, 1) * 3; 11420 } 11421 } 11422 11423 static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx) 11424 { 11425 if (regime_has_2_ranges(mmu_idx)) { 11426 return extract64(tcr, 57, 2); 11427 } else { 11428 /* Replicate the single TCMA bit so we always have 2 bits. */ 11429 return extract32(tcr, 30, 1) * 3; 11430 } 11431 } 11432 11433 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 11434 ARMMMUIdx mmu_idx, bool data) 11435 { 11436 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 11437 bool epd, hpd, using16k, using64k; 11438 int select, tsz, tbi, max_tsz; 11439 11440 if (!regime_has_2_ranges(mmu_idx)) { 11441 select = 0; 11442 tsz = extract32(tcr, 0, 6); 11443 using64k = extract32(tcr, 14, 1); 11444 using16k = extract32(tcr, 15, 1); 11445 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { 11446 /* VTCR_EL2 */ 11447 hpd = false; 11448 } else { 11449 hpd = extract32(tcr, 24, 1); 11450 } 11451 epd = false; 11452 } else { 11453 /* 11454 * Bit 55 is always between the two regions, and is canonical for 11455 * determining if address tagging is enabled. 11456 */ 11457 select = extract64(va, 55, 1); 11458 if (!select) { 11459 tsz = extract32(tcr, 0, 6); 11460 epd = extract32(tcr, 7, 1); 11461 using64k = extract32(tcr, 14, 1); 11462 using16k = extract32(tcr, 15, 1); 11463 hpd = extract64(tcr, 41, 1); 11464 } else { 11465 int tg = extract32(tcr, 30, 2); 11466 using16k = tg == 1; 11467 using64k = tg == 3; 11468 tsz = extract32(tcr, 16, 6); 11469 epd = extract32(tcr, 23, 1); 11470 hpd = extract64(tcr, 42, 1); 11471 } 11472 } 11473 11474 if (cpu_isar_feature(aa64_st, env_archcpu(env))) { 11475 max_tsz = 48 - using64k; 11476 } else { 11477 max_tsz = 39; 11478 } 11479 11480 tsz = MIN(tsz, max_tsz); 11481 tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */ 11482 11483 /* Present TBI as a composite with TBID. */ 11484 tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 11485 if (!data) { 11486 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); 11487 } 11488 tbi = (tbi >> select) & 1; 11489 11490 return (ARMVAParameters) { 11491 .tsz = tsz, 11492 .select = select, 11493 .tbi = tbi, 11494 .epd = epd, 11495 .hpd = hpd, 11496 .using16k = using16k, 11497 .using64k = using64k, 11498 }; 11499 } 11500 11501 #ifndef CONFIG_USER_ONLY 11502 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, 11503 ARMMMUIdx mmu_idx) 11504 { 11505 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 11506 uint32_t el = regime_el(env, mmu_idx); 11507 int select, tsz; 11508 bool epd, hpd; 11509 11510 assert(mmu_idx != ARMMMUIdx_Stage2_S); 11511 11512 if (mmu_idx == ARMMMUIdx_Stage2) { 11513 /* VTCR */ 11514 bool sext = extract32(tcr, 4, 1); 11515 bool sign = extract32(tcr, 3, 1); 11516 11517 /* 11518 * If the sign-extend bit is not the same as t0sz[3], the result 11519 * is unpredictable. Flag this as a guest error. 11520 */ 11521 if (sign != sext) { 11522 qemu_log_mask(LOG_GUEST_ERROR, 11523 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 11524 } 11525 tsz = sextract32(tcr, 0, 4) + 8; 11526 select = 0; 11527 hpd = false; 11528 epd = false; 11529 } else if (el == 2) { 11530 /* HTCR */ 11531 tsz = extract32(tcr, 0, 3); 11532 select = 0; 11533 hpd = extract64(tcr, 24, 1); 11534 epd = false; 11535 } else { 11536 int t0sz = extract32(tcr, 0, 3); 11537 int t1sz = extract32(tcr, 16, 3); 11538 11539 if (t1sz == 0) { 11540 select = va > (0xffffffffu >> t0sz); 11541 } else { 11542 /* Note that we will detect errors later. */ 11543 select = va >= ~(0xffffffffu >> t1sz); 11544 } 11545 if (!select) { 11546 tsz = t0sz; 11547 epd = extract32(tcr, 7, 1); 11548 hpd = extract64(tcr, 41, 1); 11549 } else { 11550 tsz = t1sz; 11551 epd = extract32(tcr, 23, 1); 11552 hpd = extract64(tcr, 42, 1); 11553 } 11554 /* For aarch32, hpd0 is not enabled without t2e as well. */ 11555 hpd &= extract32(tcr, 6, 1); 11556 } 11557 11558 return (ARMVAParameters) { 11559 .tsz = tsz, 11560 .select = select, 11561 .epd = epd, 11562 .hpd = hpd, 11563 }; 11564 } 11565 11566 /** 11567 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format 11568 * 11569 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 11570 * prot and page_size may not be filled in, and the populated fsr value provides 11571 * information on why the translation aborted, in the format of a long-format 11572 * DFSR/IFSR fault register, with the following caveats: 11573 * * the WnR bit is never set (the caller must do this). 11574 * 11575 * @env: CPUARMState 11576 * @address: virtual address to get physical address for 11577 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH 11578 * @mmu_idx: MMU index indicating required translation regime 11579 * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table 11580 * walk), must be true if this is stage 2 of a stage 1+2 walk for an 11581 * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored. 11582 * @phys_ptr: set to the physical address corresponding to the virtual address 11583 * @attrs: set to the memory transaction attributes to use 11584 * @prot: set to the permissions for the page containing phys_ptr 11585 * @page_size_ptr: set to the size of the page containing phys_ptr 11586 * @fi: set to fault info if the translation fails 11587 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 11588 */ 11589 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, 11590 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11591 bool s1_is_el0, 11592 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 11593 target_ulong *page_size_ptr, 11594 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 11595 { 11596 ARMCPU *cpu = env_archcpu(env); 11597 CPUState *cs = CPU(cpu); 11598 /* Read an LPAE long-descriptor translation table. */ 11599 ARMFaultType fault_type = ARMFault_Translation; 11600 uint32_t level; 11601 ARMVAParameters param; 11602 uint64_t ttbr; 11603 hwaddr descaddr, indexmask, indexmask_grainsize; 11604 uint32_t tableattrs; 11605 target_ulong page_size; 11606 uint32_t attrs; 11607 int32_t stride; 11608 int addrsize, inputsize; 11609 TCR *tcr = regime_tcr(env, mmu_idx); 11610 int ap, ns, xn, pxn; 11611 uint32_t el = regime_el(env, mmu_idx); 11612 uint64_t descaddrmask; 11613 bool aarch64 = arm_el_is_aa64(env, el); 11614 bool guarded = false; 11615 11616 /* TODO: This code does not support shareability levels. */ 11617 if (aarch64) { 11618 param = aa64_va_parameters(env, address, mmu_idx, 11619 access_type != MMU_INST_FETCH); 11620 level = 0; 11621 addrsize = 64 - 8 * param.tbi; 11622 inputsize = 64 - param.tsz; 11623 } else { 11624 param = aa32_va_parameters(env, address, mmu_idx); 11625 level = 1; 11626 addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); 11627 inputsize = addrsize - param.tsz; 11628 } 11629 11630 /* 11631 * We determined the region when collecting the parameters, but we 11632 * have not yet validated that the address is valid for the region. 11633 * Extract the top bits and verify that they all match select. 11634 * 11635 * For aa32, if inputsize == addrsize, then we have selected the 11636 * region by exclusion in aa32_va_parameters and there is no more 11637 * validation to do here. 11638 */ 11639 if (inputsize < addrsize) { 11640 target_ulong top_bits = sextract64(address, inputsize, 11641 addrsize - inputsize); 11642 if (-top_bits != param.select) { 11643 /* The gap between the two regions is a Translation fault */ 11644 fault_type = ARMFault_Translation; 11645 goto do_fault; 11646 } 11647 } 11648 11649 if (param.using64k) { 11650 stride = 13; 11651 } else if (param.using16k) { 11652 stride = 11; 11653 } else { 11654 stride = 9; 11655 } 11656 11657 /* Note that QEMU ignores shareability and cacheability attributes, 11658 * so we don't need to do anything with the SH, ORGN, IRGN fields 11659 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 11660 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 11661 * implement any ASID-like capability so we can ignore it (instead 11662 * we will always flush the TLB any time the ASID is changed). 11663 */ 11664 ttbr = regime_ttbr(env, mmu_idx, param.select); 11665 11666 /* Here we should have set up all the parameters for the translation: 11667 * inputsize, ttbr, epd, stride, tbi 11668 */ 11669 11670 if (param.epd) { 11671 /* Translation table walk disabled => Translation fault on TLB miss 11672 * Note: This is always 0 on 64-bit EL2 and EL3. 11673 */ 11674 goto do_fault; 11675 } 11676 11677 if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) { 11678 /* The starting level depends on the virtual address size (which can 11679 * be up to 48 bits) and the translation granule size. It indicates 11680 * the number of strides (stride bits at a time) needed to 11681 * consume the bits of the input address. In the pseudocode this is: 11682 * level = 4 - RoundUp((inputsize - grainsize) / stride) 11683 * where their 'inputsize' is our 'inputsize', 'grainsize' is 11684 * our 'stride + 3' and 'stride' is our 'stride'. 11685 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 11686 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 11687 * = 4 - (inputsize - 4) / stride; 11688 */ 11689 level = 4 - (inputsize - 4) / stride; 11690 } else { 11691 /* For stage 2 translations the starting level is specified by the 11692 * VTCR_EL2.SL0 field (whose interpretation depends on the page size) 11693 */ 11694 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); 11695 uint32_t startlevel; 11696 bool ok; 11697 11698 if (!aarch64 || stride == 9) { 11699 /* AArch32 or 4KB pages */ 11700 startlevel = 2 - sl0; 11701 11702 if (cpu_isar_feature(aa64_st, cpu)) { 11703 startlevel &= 3; 11704 } 11705 } else { 11706 /* 16KB or 64KB pages */ 11707 startlevel = 3 - sl0; 11708 } 11709 11710 /* Check that the starting level is valid. */ 11711 ok = check_s2_mmu_setup(cpu, aarch64, startlevel, 11712 inputsize, stride); 11713 if (!ok) { 11714 fault_type = ARMFault_Translation; 11715 goto do_fault; 11716 } 11717 level = startlevel; 11718 } 11719 11720 indexmask_grainsize = (1ULL << (stride + 3)) - 1; 11721 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; 11722 11723 /* Now we can extract the actual base address from the TTBR */ 11724 descaddr = extract64(ttbr, 0, 48); 11725 /* 11726 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR 11727 * and also to mask out CnP (bit 0) which could validly be non-zero. 11728 */ 11729 descaddr &= ~indexmask; 11730 11731 /* The address field in the descriptor goes up to bit 39 for ARMv7 11732 * but up to bit 47 for ARMv8, but we use the descaddrmask 11733 * up to bit 39 for AArch32, because we don't need other bits in that case 11734 * to construct next descriptor address (anyway they should be all zeroes). 11735 */ 11736 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & 11737 ~indexmask_grainsize; 11738 11739 /* Secure accesses start with the page table in secure memory and 11740 * can be downgraded to non-secure at any step. Non-secure accesses 11741 * remain non-secure. We implement this by just ORing in the NSTable/NS 11742 * bits at each step. 11743 */ 11744 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); 11745 for (;;) { 11746 uint64_t descriptor; 11747 bool nstable; 11748 11749 descaddr |= (address >> (stride * (4 - level))) & indexmask; 11750 descaddr &= ~7ULL; 11751 nstable = extract32(tableattrs, 4, 1); 11752 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); 11753 if (fi->type != ARMFault_None) { 11754 goto do_fault; 11755 } 11756 11757 if (!(descriptor & 1) || 11758 (!(descriptor & 2) && (level == 3))) { 11759 /* Invalid, or the Reserved level 3 encoding */ 11760 goto do_fault; 11761 } 11762 descaddr = descriptor & descaddrmask; 11763 11764 if ((descriptor & 2) && (level < 3)) { 11765 /* Table entry. The top five bits are attributes which may 11766 * propagate down through lower levels of the table (and 11767 * which are all arranged so that 0 means "no effect", so 11768 * we can gather them up by ORing in the bits at each level). 11769 */ 11770 tableattrs |= extract64(descriptor, 59, 5); 11771 level++; 11772 indexmask = indexmask_grainsize; 11773 continue; 11774 } 11775 /* Block entry at level 1 or 2, or page entry at level 3. 11776 * These are basically the same thing, although the number 11777 * of bits we pull in from the vaddr varies. 11778 */ 11779 page_size = (1ULL << ((stride * (4 - level)) + 3)); 11780 descaddr |= (address & (page_size - 1)); 11781 /* Extract attributes from the descriptor */ 11782 attrs = extract64(descriptor, 2, 10) 11783 | (extract64(descriptor, 52, 12) << 10); 11784 11785 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { 11786 /* Stage 2 table descriptors do not include any attribute fields */ 11787 break; 11788 } 11789 /* Merge in attributes from table descriptors */ 11790 attrs |= nstable << 3; /* NS */ 11791 guarded = extract64(descriptor, 50, 1); /* GP */ 11792 if (param.hpd) { 11793 /* HPD disables all the table attributes except NSTable. */ 11794 break; 11795 } 11796 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ 11797 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 11798 * means "force PL1 access only", which means forcing AP[1] to 0. 11799 */ 11800 attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */ 11801 attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */ 11802 break; 11803 } 11804 /* Here descaddr is the final physical address, and attributes 11805 * are all in attrs. 11806 */ 11807 fault_type = ARMFault_AccessFlag; 11808 if ((attrs & (1 << 8)) == 0) { 11809 /* Access flag */ 11810 goto do_fault; 11811 } 11812 11813 ap = extract32(attrs, 4, 2); 11814 11815 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { 11816 ns = mmu_idx == ARMMMUIdx_Stage2; 11817 xn = extract32(attrs, 11, 2); 11818 *prot = get_S2prot(env, ap, xn, s1_is_el0); 11819 } else { 11820 ns = extract32(attrs, 3, 1); 11821 xn = extract32(attrs, 12, 1); 11822 pxn = extract32(attrs, 11, 1); 11823 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); 11824 } 11825 11826 fault_type = ARMFault_Permission; 11827 if (!(*prot & (1 << access_type))) { 11828 goto do_fault; 11829 } 11830 11831 if (ns) { 11832 /* The NS bit will (as required by the architecture) have no effect if 11833 * the CPU doesn't support TZ or this is a non-secure translation 11834 * regime, because the attribute will already be non-secure. 11835 */ 11836 txattrs->secure = false; 11837 } 11838 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */ 11839 if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) { 11840 arm_tlb_bti_gp(txattrs) = true; 11841 } 11842 11843 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { 11844 cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4)); 11845 } else { 11846 /* Index into MAIR registers for cache attributes */ 11847 uint8_t attrindx = extract32(attrs, 0, 3); 11848 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 11849 assert(attrindx <= 7); 11850 cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 11851 } 11852 cacheattrs->shareability = extract32(attrs, 6, 2); 11853 11854 *phys_ptr = descaddr; 11855 *page_size_ptr = page_size; 11856 return false; 11857 11858 do_fault: 11859 fi->type = fault_type; 11860 fi->level = level; 11861 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ 11862 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 || 11863 mmu_idx == ARMMMUIdx_Stage2_S); 11864 fi->s1ns = mmu_idx == ARMMMUIdx_Stage2; 11865 return true; 11866 } 11867 11868 static inline void get_phys_addr_pmsav7_default(CPUARMState *env, 11869 ARMMMUIdx mmu_idx, 11870 int32_t address, int *prot) 11871 { 11872 if (!arm_feature(env, ARM_FEATURE_M)) { 11873 *prot = PAGE_READ | PAGE_WRITE; 11874 switch (address) { 11875 case 0xF0000000 ... 0xFFFFFFFF: 11876 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 11877 /* hivecs execing is ok */ 11878 *prot |= PAGE_EXEC; 11879 } 11880 break; 11881 case 0x00000000 ... 0x7FFFFFFF: 11882 *prot |= PAGE_EXEC; 11883 break; 11884 } 11885 } else { 11886 /* Default system address map for M profile cores. 11887 * The architecture specifies which regions are execute-never; 11888 * at the MPU level no other checks are defined. 11889 */ 11890 switch (address) { 11891 case 0x00000000 ... 0x1fffffff: /* ROM */ 11892 case 0x20000000 ... 0x3fffffff: /* SRAM */ 11893 case 0x60000000 ... 0x7fffffff: /* RAM */ 11894 case 0x80000000 ... 0x9fffffff: /* RAM */ 11895 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 11896 break; 11897 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 11898 case 0xa0000000 ... 0xbfffffff: /* Device */ 11899 case 0xc0000000 ... 0xdfffffff: /* Device */ 11900 case 0xe0000000 ... 0xffffffff: /* System */ 11901 *prot = PAGE_READ | PAGE_WRITE; 11902 break; 11903 default: 11904 g_assert_not_reached(); 11905 } 11906 } 11907 } 11908 11909 static bool pmsav7_use_background_region(ARMCPU *cpu, 11910 ARMMMUIdx mmu_idx, bool is_user) 11911 { 11912 /* Return true if we should use the default memory map as a 11913 * "background" region if there are no hits against any MPU regions. 11914 */ 11915 CPUARMState *env = &cpu->env; 11916 11917 if (is_user) { 11918 return false; 11919 } 11920 11921 if (arm_feature(env, ARM_FEATURE_M)) { 11922 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] 11923 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 11924 } else { 11925 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 11926 } 11927 } 11928 11929 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) 11930 { 11931 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 11932 return arm_feature(env, ARM_FEATURE_M) && 11933 extract32(address, 20, 12) == 0xe00; 11934 } 11935 11936 static inline bool m_is_system_region(CPUARMState *env, uint32_t address) 11937 { 11938 /* True if address is in the M profile system region 11939 * 0xe0000000 - 0xffffffff 11940 */ 11941 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 11942 } 11943 11944 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, 11945 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11946 hwaddr *phys_ptr, int *prot, 11947 target_ulong *page_size, 11948 ARMMMUFaultInfo *fi) 11949 { 11950 ARMCPU *cpu = env_archcpu(env); 11951 int n; 11952 bool is_user = regime_is_user(env, mmu_idx); 11953 11954 *phys_ptr = address; 11955 *page_size = TARGET_PAGE_SIZE; 11956 *prot = 0; 11957 11958 if (regime_translation_disabled(env, mmu_idx) || 11959 m_is_ppb_region(env, address)) { 11960 /* MPU disabled or M profile PPB access: use default memory map. 11961 * The other case which uses the default memory map in the 11962 * v7M ARM ARM pseudocode is exception vector reads from the vector 11963 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 11964 * which always does a direct read using address_space_ldl(), rather 11965 * than going via this function, so we don't need to check that here. 11966 */ 11967 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 11968 } else { /* MPU enabled */ 11969 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 11970 /* region search */ 11971 uint32_t base = env->pmsav7.drbar[n]; 11972 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 11973 uint32_t rmask; 11974 bool srdis = false; 11975 11976 if (!(env->pmsav7.drsr[n] & 0x1)) { 11977 continue; 11978 } 11979 11980 if (!rsize) { 11981 qemu_log_mask(LOG_GUEST_ERROR, 11982 "DRSR[%d]: Rsize field cannot be 0\n", n); 11983 continue; 11984 } 11985 rsize++; 11986 rmask = (1ull << rsize) - 1; 11987 11988 if (base & rmask) { 11989 qemu_log_mask(LOG_GUEST_ERROR, 11990 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 11991 "to DRSR region size, mask = 0x%" PRIx32 "\n", 11992 n, base, rmask); 11993 continue; 11994 } 11995 11996 if (address < base || address > base + rmask) { 11997 /* 11998 * Address not in this region. We must check whether the 11999 * region covers addresses in the same page as our address. 12000 * In that case we must not report a size that covers the 12001 * whole page for a subsequent hit against a different MPU 12002 * region or the background region, because it would result in 12003 * incorrect TLB hits for subsequent accesses to addresses that 12004 * are in this MPU region. 12005 */ 12006 if (ranges_overlap(base, rmask, 12007 address & TARGET_PAGE_MASK, 12008 TARGET_PAGE_SIZE)) { 12009 *page_size = 1; 12010 } 12011 continue; 12012 } 12013 12014 /* Region matched */ 12015 12016 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 12017 int i, snd; 12018 uint32_t srdis_mask; 12019 12020 rsize -= 3; /* sub region size (power of 2) */ 12021 snd = ((address - base) >> rsize) & 0x7; 12022 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 12023 12024 srdis_mask = srdis ? 0x3 : 0x0; 12025 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 12026 /* This will check in groups of 2, 4 and then 8, whether 12027 * the subregion bits are consistent. rsize is incremented 12028 * back up to give the region size, considering consistent 12029 * adjacent subregions as one region. Stop testing if rsize 12030 * is already big enough for an entire QEMU page. 12031 */ 12032 int snd_rounded = snd & ~(i - 1); 12033 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 12034 snd_rounded + 8, i); 12035 if (srdis_mask ^ srdis_multi) { 12036 break; 12037 } 12038 srdis_mask = (srdis_mask << i) | srdis_mask; 12039 rsize++; 12040 } 12041 } 12042 if (srdis) { 12043 continue; 12044 } 12045 if (rsize < TARGET_PAGE_BITS) { 12046 *page_size = 1 << rsize; 12047 } 12048 break; 12049 } 12050 12051 if (n == -1) { /* no hits */ 12052 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 12053 /* background fault */ 12054 fi->type = ARMFault_Background; 12055 return true; 12056 } 12057 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 12058 } else { /* a MPU hit! */ 12059 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 12060 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 12061 12062 if (m_is_system_region(env, address)) { 12063 /* System space is always execute never */ 12064 xn = 1; 12065 } 12066 12067 if (is_user) { /* User mode AP bit decoding */ 12068 switch (ap) { 12069 case 0: 12070 case 1: 12071 case 5: 12072 break; /* no access */ 12073 case 3: 12074 *prot |= PAGE_WRITE; 12075 /* fall through */ 12076 case 2: 12077 case 6: 12078 *prot |= PAGE_READ | PAGE_EXEC; 12079 break; 12080 case 7: 12081 /* for v7M, same as 6; for R profile a reserved value */ 12082 if (arm_feature(env, ARM_FEATURE_M)) { 12083 *prot |= PAGE_READ | PAGE_EXEC; 12084 break; 12085 } 12086 /* fall through */ 12087 default: 12088 qemu_log_mask(LOG_GUEST_ERROR, 12089 "DRACR[%d]: Bad value for AP bits: 0x%" 12090 PRIx32 "\n", n, ap); 12091 } 12092 } else { /* Priv. mode AP bits decoding */ 12093 switch (ap) { 12094 case 0: 12095 break; /* no access */ 12096 case 1: 12097 case 2: 12098 case 3: 12099 *prot |= PAGE_WRITE; 12100 /* fall through */ 12101 case 5: 12102 case 6: 12103 *prot |= PAGE_READ | PAGE_EXEC; 12104 break; 12105 case 7: 12106 /* for v7M, same as 6; for R profile a reserved value */ 12107 if (arm_feature(env, ARM_FEATURE_M)) { 12108 *prot |= PAGE_READ | PAGE_EXEC; 12109 break; 12110 } 12111 /* fall through */ 12112 default: 12113 qemu_log_mask(LOG_GUEST_ERROR, 12114 "DRACR[%d]: Bad value for AP bits: 0x%" 12115 PRIx32 "\n", n, ap); 12116 } 12117 } 12118 12119 /* execute never */ 12120 if (xn) { 12121 *prot &= ~PAGE_EXEC; 12122 } 12123 } 12124 } 12125 12126 fi->type = ARMFault_Permission; 12127 fi->level = 1; 12128 return !(*prot & (1 << access_type)); 12129 } 12130 12131 static bool v8m_is_sau_exempt(CPUARMState *env, 12132 uint32_t address, MMUAccessType access_type) 12133 { 12134 /* The architecture specifies that certain address ranges are 12135 * exempt from v8M SAU/IDAU checks. 12136 */ 12137 return 12138 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 12139 (address >= 0xe0000000 && address <= 0xe0002fff) || 12140 (address >= 0xe000e000 && address <= 0xe000efff) || 12141 (address >= 0xe002e000 && address <= 0xe002efff) || 12142 (address >= 0xe0040000 && address <= 0xe0041fff) || 12143 (address >= 0xe00ff000 && address <= 0xe00fffff); 12144 } 12145 12146 void v8m_security_lookup(CPUARMState *env, uint32_t address, 12147 MMUAccessType access_type, ARMMMUIdx mmu_idx, 12148 V8M_SAttributes *sattrs) 12149 { 12150 /* Look up the security attributes for this address. Compare the 12151 * pseudocode SecurityCheck() function. 12152 * We assume the caller has zero-initialized *sattrs. 12153 */ 12154 ARMCPU *cpu = env_archcpu(env); 12155 int r; 12156 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 12157 int idau_region = IREGION_NOTVALID; 12158 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 12159 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 12160 12161 if (cpu->idau) { 12162 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 12163 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 12164 12165 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 12166 &idau_nsc); 12167 } 12168 12169 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 12170 /* 0xf0000000..0xffffffff is always S for insn fetches */ 12171 return; 12172 } 12173 12174 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 12175 sattrs->ns = !regime_is_secure(env, mmu_idx); 12176 return; 12177 } 12178 12179 if (idau_region != IREGION_NOTVALID) { 12180 sattrs->irvalid = true; 12181 sattrs->iregion = idau_region; 12182 } 12183 12184 switch (env->sau.ctrl & 3) { 12185 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 12186 break; 12187 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 12188 sattrs->ns = true; 12189 break; 12190 default: /* SAU.ENABLE == 1 */ 12191 for (r = 0; r < cpu->sau_sregion; r++) { 12192 if (env->sau.rlar[r] & 1) { 12193 uint32_t base = env->sau.rbar[r] & ~0x1f; 12194 uint32_t limit = env->sau.rlar[r] | 0x1f; 12195 12196 if (base <= address && limit >= address) { 12197 if (base > addr_page_base || limit < addr_page_limit) { 12198 sattrs->subpage = true; 12199 } 12200 if (sattrs->srvalid) { 12201 /* If we hit in more than one region then we must report 12202 * as Secure, not NS-Callable, with no valid region 12203 * number info. 12204 */ 12205 sattrs->ns = false; 12206 sattrs->nsc = false; 12207 sattrs->sregion = 0; 12208 sattrs->srvalid = false; 12209 break; 12210 } else { 12211 if (env->sau.rlar[r] & 2) { 12212 sattrs->nsc = true; 12213 } else { 12214 sattrs->ns = true; 12215 } 12216 sattrs->srvalid = true; 12217 sattrs->sregion = r; 12218 } 12219 } else { 12220 /* 12221 * Address not in this region. We must check whether the 12222 * region covers addresses in the same page as our address. 12223 * In that case we must not report a size that covers the 12224 * whole page for a subsequent hit against a different MPU 12225 * region or the background region, because it would result 12226 * in incorrect TLB hits for subsequent accesses to 12227 * addresses that are in this MPU region. 12228 */ 12229 if (limit >= base && 12230 ranges_overlap(base, limit - base + 1, 12231 addr_page_base, 12232 TARGET_PAGE_SIZE)) { 12233 sattrs->subpage = true; 12234 } 12235 } 12236 } 12237 } 12238 break; 12239 } 12240 12241 /* 12242 * The IDAU will override the SAU lookup results if it specifies 12243 * higher security than the SAU does. 12244 */ 12245 if (!idau_ns) { 12246 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 12247 sattrs->ns = false; 12248 sattrs->nsc = idau_nsc; 12249 } 12250 } 12251 } 12252 12253 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 12254 MMUAccessType access_type, ARMMMUIdx mmu_idx, 12255 hwaddr *phys_ptr, MemTxAttrs *txattrs, 12256 int *prot, bool *is_subpage, 12257 ARMMMUFaultInfo *fi, uint32_t *mregion) 12258 { 12259 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check 12260 * that a full phys-to-virt translation does). 12261 * mregion is (if not NULL) set to the region number which matched, 12262 * or -1 if no region number is returned (MPU off, address did not 12263 * hit a region, address hit in multiple regions). 12264 * We set is_subpage to true if the region hit doesn't cover the 12265 * entire TARGET_PAGE the address is within. 12266 */ 12267 ARMCPU *cpu = env_archcpu(env); 12268 bool is_user = regime_is_user(env, mmu_idx); 12269 uint32_t secure = regime_is_secure(env, mmu_idx); 12270 int n; 12271 int matchregion = -1; 12272 bool hit = false; 12273 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 12274 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 12275 12276 *is_subpage = false; 12277 *phys_ptr = address; 12278 *prot = 0; 12279 if (mregion) { 12280 *mregion = -1; 12281 } 12282 12283 /* Unlike the ARM ARM pseudocode, we don't need to check whether this 12284 * was an exception vector read from the vector table (which is always 12285 * done using the default system address map), because those accesses 12286 * are done in arm_v7m_load_vector(), which always does a direct 12287 * read using address_space_ldl(), rather than going via this function. 12288 */ 12289 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ 12290 hit = true; 12291 } else if (m_is_ppb_region(env, address)) { 12292 hit = true; 12293 } else { 12294 if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 12295 hit = true; 12296 } 12297 12298 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 12299 /* region search */ 12300 /* Note that the base address is bits [31:5] from the register 12301 * with bits [4:0] all zeroes, but the limit address is bits 12302 * [31:5] from the register with bits [4:0] all ones. 12303 */ 12304 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; 12305 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; 12306 12307 if (!(env->pmsav8.rlar[secure][n] & 0x1)) { 12308 /* Region disabled */ 12309 continue; 12310 } 12311 12312 if (address < base || address > limit) { 12313 /* 12314 * Address not in this region. We must check whether the 12315 * region covers addresses in the same page as our address. 12316 * In that case we must not report a size that covers the 12317 * whole page for a subsequent hit against a different MPU 12318 * region or the background region, because it would result in 12319 * incorrect TLB hits for subsequent accesses to addresses that 12320 * are in this MPU region. 12321 */ 12322 if (limit >= base && 12323 ranges_overlap(base, limit - base + 1, 12324 addr_page_base, 12325 TARGET_PAGE_SIZE)) { 12326 *is_subpage = true; 12327 } 12328 continue; 12329 } 12330 12331 if (base > addr_page_base || limit < addr_page_limit) { 12332 *is_subpage = true; 12333 } 12334 12335 if (matchregion != -1) { 12336 /* Multiple regions match -- always a failure (unlike 12337 * PMSAv7 where highest-numbered-region wins) 12338 */ 12339 fi->type = ARMFault_Permission; 12340 fi->level = 1; 12341 return true; 12342 } 12343 12344 matchregion = n; 12345 hit = true; 12346 } 12347 } 12348 12349 if (!hit) { 12350 /* background fault */ 12351 fi->type = ARMFault_Background; 12352 return true; 12353 } 12354 12355 if (matchregion == -1) { 12356 /* hit using the background region */ 12357 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 12358 } else { 12359 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); 12360 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); 12361 bool pxn = false; 12362 12363 if (arm_feature(env, ARM_FEATURE_V8_1M)) { 12364 pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1); 12365 } 12366 12367 if (m_is_system_region(env, address)) { 12368 /* System space is always execute never */ 12369 xn = 1; 12370 } 12371 12372 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 12373 if (*prot && !xn && !(pxn && !is_user)) { 12374 *prot |= PAGE_EXEC; 12375 } 12376 /* We don't need to look the attribute up in the MAIR0/MAIR1 12377 * registers because that only tells us about cacheability. 12378 */ 12379 if (mregion) { 12380 *mregion = matchregion; 12381 } 12382 } 12383 12384 fi->type = ARMFault_Permission; 12385 fi->level = 1; 12386 return !(*prot & (1 << access_type)); 12387 } 12388 12389 12390 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, 12391 MMUAccessType access_type, ARMMMUIdx mmu_idx, 12392 hwaddr *phys_ptr, MemTxAttrs *txattrs, 12393 int *prot, target_ulong *page_size, 12394 ARMMMUFaultInfo *fi) 12395 { 12396 uint32_t secure = regime_is_secure(env, mmu_idx); 12397 V8M_SAttributes sattrs = {}; 12398 bool ret; 12399 bool mpu_is_subpage; 12400 12401 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 12402 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); 12403 if (access_type == MMU_INST_FETCH) { 12404 /* Instruction fetches always use the MMU bank and the 12405 * transaction attribute determined by the fetch address, 12406 * regardless of CPU state. This is painful for QEMU 12407 * to handle, because it would mean we need to encode 12408 * into the mmu_idx not just the (user, negpri) information 12409 * for the current security state but also that for the 12410 * other security state, which would balloon the number 12411 * of mmu_idx values needed alarmingly. 12412 * Fortunately we can avoid this because it's not actually 12413 * possible to arbitrarily execute code from memory with 12414 * the wrong security attribute: it will always generate 12415 * an exception of some kind or another, apart from the 12416 * special case of an NS CPU executing an SG instruction 12417 * in S&NSC memory. So we always just fail the translation 12418 * here and sort things out in the exception handler 12419 * (including possibly emulating an SG instruction). 12420 */ 12421 if (sattrs.ns != !secure) { 12422 if (sattrs.nsc) { 12423 fi->type = ARMFault_QEMU_NSCExec; 12424 } else { 12425 fi->type = ARMFault_QEMU_SFault; 12426 } 12427 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 12428 *phys_ptr = address; 12429 *prot = 0; 12430 return true; 12431 } 12432 } else { 12433 /* For data accesses we always use the MMU bank indicated 12434 * by the current CPU state, but the security attributes 12435 * might downgrade a secure access to nonsecure. 12436 */ 12437 if (sattrs.ns) { 12438 txattrs->secure = false; 12439 } else if (!secure) { 12440 /* NS access to S memory must fault. 12441 * Architecturally we should first check whether the 12442 * MPU information for this address indicates that we 12443 * are doing an unaligned access to Device memory, which 12444 * should generate a UsageFault instead. QEMU does not 12445 * currently check for that kind of unaligned access though. 12446 * If we added it we would need to do so as a special case 12447 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 12448 */ 12449 fi->type = ARMFault_QEMU_SFault; 12450 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 12451 *phys_ptr = address; 12452 *prot = 0; 12453 return true; 12454 } 12455 } 12456 } 12457 12458 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, 12459 txattrs, prot, &mpu_is_subpage, fi, NULL); 12460 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; 12461 return ret; 12462 } 12463 12464 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, 12465 MMUAccessType access_type, ARMMMUIdx mmu_idx, 12466 hwaddr *phys_ptr, int *prot, 12467 ARMMMUFaultInfo *fi) 12468 { 12469 int n; 12470 uint32_t mask; 12471 uint32_t base; 12472 bool is_user = regime_is_user(env, mmu_idx); 12473 12474 if (regime_translation_disabled(env, mmu_idx)) { 12475 /* MPU disabled. */ 12476 *phys_ptr = address; 12477 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 12478 return false; 12479 } 12480 12481 *phys_ptr = address; 12482 for (n = 7; n >= 0; n--) { 12483 base = env->cp15.c6_region[n]; 12484 if ((base & 1) == 0) { 12485 continue; 12486 } 12487 mask = 1 << ((base >> 1) & 0x1f); 12488 /* Keep this shift separate from the above to avoid an 12489 (undefined) << 32. */ 12490 mask = (mask << 1) - 1; 12491 if (((base ^ address) & ~mask) == 0) { 12492 break; 12493 } 12494 } 12495 if (n < 0) { 12496 fi->type = ARMFault_Background; 12497 return true; 12498 } 12499 12500 if (access_type == MMU_INST_FETCH) { 12501 mask = env->cp15.pmsav5_insn_ap; 12502 } else { 12503 mask = env->cp15.pmsav5_data_ap; 12504 } 12505 mask = (mask >> (n * 4)) & 0xf; 12506 switch (mask) { 12507 case 0: 12508 fi->type = ARMFault_Permission; 12509 fi->level = 1; 12510 return true; 12511 case 1: 12512 if (is_user) { 12513 fi->type = ARMFault_Permission; 12514 fi->level = 1; 12515 return true; 12516 } 12517 *prot = PAGE_READ | PAGE_WRITE; 12518 break; 12519 case 2: 12520 *prot = PAGE_READ; 12521 if (!is_user) { 12522 *prot |= PAGE_WRITE; 12523 } 12524 break; 12525 case 3: 12526 *prot = PAGE_READ | PAGE_WRITE; 12527 break; 12528 case 5: 12529 if (is_user) { 12530 fi->type = ARMFault_Permission; 12531 fi->level = 1; 12532 return true; 12533 } 12534 *prot = PAGE_READ; 12535 break; 12536 case 6: 12537 *prot = PAGE_READ; 12538 break; 12539 default: 12540 /* Bad permission. */ 12541 fi->type = ARMFault_Permission; 12542 fi->level = 1; 12543 return true; 12544 } 12545 *prot |= PAGE_EXEC; 12546 return false; 12547 } 12548 12549 /* Combine either inner or outer cacheability attributes for normal 12550 * memory, according to table D4-42 and pseudocode procedure 12551 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 12552 * 12553 * NB: only stage 1 includes allocation hints (RW bits), leading to 12554 * some asymmetry. 12555 */ 12556 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 12557 { 12558 if (s1 == 4 || s2 == 4) { 12559 /* non-cacheable has precedence */ 12560 return 4; 12561 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 12562 /* stage 1 write-through takes precedence */ 12563 return s1; 12564 } else if (extract32(s2, 2, 2) == 2) { 12565 /* stage 2 write-through takes precedence, but the allocation hint 12566 * is still taken from stage 1 12567 */ 12568 return (2 << 2) | extract32(s1, 0, 2); 12569 } else { /* write-back */ 12570 return s1; 12571 } 12572 } 12573 12574 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 12575 * and CombineS1S2Desc() 12576 * 12577 * @s1: Attributes from stage 1 walk 12578 * @s2: Attributes from stage 2 walk 12579 */ 12580 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) 12581 { 12582 uint8_t s1lo, s2lo, s1hi, s2hi; 12583 ARMCacheAttrs ret; 12584 bool tagged = false; 12585 12586 if (s1.attrs == 0xf0) { 12587 tagged = true; 12588 s1.attrs = 0xff; 12589 } 12590 12591 s1lo = extract32(s1.attrs, 0, 4); 12592 s2lo = extract32(s2.attrs, 0, 4); 12593 s1hi = extract32(s1.attrs, 4, 4); 12594 s2hi = extract32(s2.attrs, 4, 4); 12595 12596 /* Combine shareability attributes (table D4-43) */ 12597 if (s1.shareability == 2 || s2.shareability == 2) { 12598 /* if either are outer-shareable, the result is outer-shareable */ 12599 ret.shareability = 2; 12600 } else if (s1.shareability == 3 || s2.shareability == 3) { 12601 /* if either are inner-shareable, the result is inner-shareable */ 12602 ret.shareability = 3; 12603 } else { 12604 /* both non-shareable */ 12605 ret.shareability = 0; 12606 } 12607 12608 /* Combine memory type and cacheability attributes */ 12609 if (s1hi == 0 || s2hi == 0) { 12610 /* Device has precedence over normal */ 12611 if (s1lo == 0 || s2lo == 0) { 12612 /* nGnRnE has precedence over anything */ 12613 ret.attrs = 0; 12614 } else if (s1lo == 4 || s2lo == 4) { 12615 /* non-Reordering has precedence over Reordering */ 12616 ret.attrs = 4; /* nGnRE */ 12617 } else if (s1lo == 8 || s2lo == 8) { 12618 /* non-Gathering has precedence over Gathering */ 12619 ret.attrs = 8; /* nGRE */ 12620 } else { 12621 ret.attrs = 0xc; /* GRE */ 12622 } 12623 12624 /* Any location for which the resultant memory type is any 12625 * type of Device memory is always treated as Outer Shareable. 12626 */ 12627 ret.shareability = 2; 12628 } else { /* Normal memory */ 12629 /* Outer/inner cacheability combine independently */ 12630 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 12631 | combine_cacheattr_nibble(s1lo, s2lo); 12632 12633 if (ret.attrs == 0x44) { 12634 /* Any location for which the resultant memory type is Normal 12635 * Inner Non-cacheable, Outer Non-cacheable is always treated 12636 * as Outer Shareable. 12637 */ 12638 ret.shareability = 2; 12639 } 12640 } 12641 12642 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */ 12643 if (tagged && ret.attrs == 0xff) { 12644 ret.attrs = 0xf0; 12645 } 12646 12647 return ret; 12648 } 12649 12650 12651 /* get_phys_addr - get the physical address for this virtual address 12652 * 12653 * Find the physical address corresponding to the given virtual address, 12654 * by doing a translation table walk on MMU based systems or using the 12655 * MPU state on MPU based systems. 12656 * 12657 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 12658 * prot and page_size may not be filled in, and the populated fsr value provides 12659 * information on why the translation aborted, in the format of a 12660 * DFSR/IFSR fault register, with the following caveats: 12661 * * we honour the short vs long DFSR format differences. 12662 * * the WnR bit is never set (the caller must do this). 12663 * * for PSMAv5 based systems we don't bother to return a full FSR format 12664 * value. 12665 * 12666 * @env: CPUARMState 12667 * @address: virtual address to get physical address for 12668 * @access_type: 0 for read, 1 for write, 2 for execute 12669 * @mmu_idx: MMU index indicating required translation regime 12670 * @phys_ptr: set to the physical address corresponding to the virtual address 12671 * @attrs: set to the memory transaction attributes to use 12672 * @prot: set to the permissions for the page containing phys_ptr 12673 * @page_size: set to the size of the page containing phys_ptr 12674 * @fi: set to fault info if the translation fails 12675 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 12676 */ 12677 bool get_phys_addr(CPUARMState *env, target_ulong address, 12678 MMUAccessType access_type, ARMMMUIdx mmu_idx, 12679 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 12680 target_ulong *page_size, 12681 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 12682 { 12683 ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx); 12684 12685 if (mmu_idx != s1_mmu_idx) { 12686 /* Call ourselves recursively to do the stage 1 and then stage 2 12687 * translations if mmu_idx is a two-stage regime. 12688 */ 12689 if (arm_feature(env, ARM_FEATURE_EL2)) { 12690 hwaddr ipa; 12691 int s2_prot; 12692 int ret; 12693 ARMCacheAttrs cacheattrs2 = {}; 12694 ARMMMUIdx s2_mmu_idx; 12695 bool is_el0; 12696 12697 ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa, 12698 attrs, prot, page_size, fi, cacheattrs); 12699 12700 /* If S1 fails or S2 is disabled, return early. */ 12701 if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) { 12702 *phys_ptr = ipa; 12703 return ret; 12704 } 12705 12706 s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; 12707 is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0; 12708 12709 /* S1 is done. Now do S2 translation. */ 12710 ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0, 12711 phys_ptr, attrs, &s2_prot, 12712 page_size, fi, &cacheattrs2); 12713 fi->s2addr = ipa; 12714 /* Combine the S1 and S2 perms. */ 12715 *prot &= s2_prot; 12716 12717 /* If S2 fails, return early. */ 12718 if (ret) { 12719 return ret; 12720 } 12721 12722 /* Combine the S1 and S2 cache attributes. */ 12723 if (arm_hcr_el2_eff(env) & HCR_DC) { 12724 /* 12725 * HCR.DC forces the first stage attributes to 12726 * Normal Non-Shareable, 12727 * Inner Write-Back Read-Allocate Write-Allocate, 12728 * Outer Write-Back Read-Allocate Write-Allocate. 12729 * Do not overwrite Tagged within attrs. 12730 */ 12731 if (cacheattrs->attrs != 0xf0) { 12732 cacheattrs->attrs = 0xff; 12733 } 12734 cacheattrs->shareability = 0; 12735 } 12736 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 12737 12738 /* Check if IPA translates to secure or non-secure PA space. */ 12739 if (arm_is_secure_below_el3(env)) { 12740 if (attrs->secure) { 12741 attrs->secure = 12742 !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW)); 12743 } else { 12744 attrs->secure = 12745 !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW)) 12746 || (env->cp15.vstcr_el2.raw_tcr & VSTCR_SA)); 12747 } 12748 } 12749 return 0; 12750 } else { 12751 /* 12752 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. 12753 */ 12754 mmu_idx = stage_1_mmu_idx(mmu_idx); 12755 } 12756 } 12757 12758 /* The page table entries may downgrade secure to non-secure, but 12759 * cannot upgrade an non-secure translation regime's attributes 12760 * to secure. 12761 */ 12762 attrs->secure = regime_is_secure(env, mmu_idx); 12763 attrs->user = regime_is_user(env, mmu_idx); 12764 12765 /* Fast Context Switch Extension. This doesn't exist at all in v8. 12766 * In v7 and earlier it affects all stage 1 translations. 12767 */ 12768 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 12769 && !arm_feature(env, ARM_FEATURE_V8)) { 12770 if (regime_el(env, mmu_idx) == 3) { 12771 address += env->cp15.fcseidr_s; 12772 } else { 12773 address += env->cp15.fcseidr_ns; 12774 } 12775 } 12776 12777 if (arm_feature(env, ARM_FEATURE_PMSA)) { 12778 bool ret; 12779 *page_size = TARGET_PAGE_SIZE; 12780 12781 if (arm_feature(env, ARM_FEATURE_V8)) { 12782 /* PMSAv8 */ 12783 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, 12784 phys_ptr, attrs, prot, page_size, fi); 12785 } else if (arm_feature(env, ARM_FEATURE_V7)) { 12786 /* PMSAv7 */ 12787 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, 12788 phys_ptr, prot, page_size, fi); 12789 } else { 12790 /* Pre-v7 MPU */ 12791 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, 12792 phys_ptr, prot, fi); 12793 } 12794 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 12795 " mmu_idx %u -> %s (prot %c%c%c)\n", 12796 access_type == MMU_DATA_LOAD ? "reading" : 12797 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 12798 (uint32_t)address, mmu_idx, 12799 ret ? "Miss" : "Hit", 12800 *prot & PAGE_READ ? 'r' : '-', 12801 *prot & PAGE_WRITE ? 'w' : '-', 12802 *prot & PAGE_EXEC ? 'x' : '-'); 12803 12804 return ret; 12805 } 12806 12807 /* Definitely a real MMU, not an MPU */ 12808 12809 if (regime_translation_disabled(env, mmu_idx)) { 12810 uint64_t hcr; 12811 uint8_t memattr; 12812 12813 /* 12814 * MMU disabled. S1 addresses within aa64 translation regimes are 12815 * still checked for bounds -- see AArch64.TranslateAddressS1Off. 12816 */ 12817 if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) { 12818 int r_el = regime_el(env, mmu_idx); 12819 if (arm_el_is_aa64(env, r_el)) { 12820 int pamax = arm_pamax(env_archcpu(env)); 12821 uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr; 12822 int addrtop, tbi; 12823 12824 tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 12825 if (access_type == MMU_INST_FETCH) { 12826 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); 12827 } 12828 tbi = (tbi >> extract64(address, 55, 1)) & 1; 12829 addrtop = (tbi ? 55 : 63); 12830 12831 if (extract64(address, pamax, addrtop - pamax + 1) != 0) { 12832 fi->type = ARMFault_AddressSize; 12833 fi->level = 0; 12834 fi->stage2 = false; 12835 return 1; 12836 } 12837 12838 /* 12839 * When TBI is disabled, we've just validated that all of the 12840 * bits above PAMax are zero, so logically we only need to 12841 * clear the top byte for TBI. But it's clearer to follow 12842 * the pseudocode set of addrdesc.paddress. 12843 */ 12844 address = extract64(address, 0, 52); 12845 } 12846 } 12847 *phys_ptr = address; 12848 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 12849 *page_size = TARGET_PAGE_SIZE; 12850 12851 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */ 12852 hcr = arm_hcr_el2_eff(env); 12853 cacheattrs->shareability = 0; 12854 if (hcr & HCR_DC) { 12855 if (hcr & HCR_DCT) { 12856 memattr = 0xf0; /* Tagged, Normal, WB, RWA */ 12857 } else { 12858 memattr = 0xff; /* Normal, WB, RWA */ 12859 } 12860 } else if (access_type == MMU_INST_FETCH) { 12861 if (regime_sctlr(env, mmu_idx) & SCTLR_I) { 12862 memattr = 0xee; /* Normal, WT, RA, NT */ 12863 } else { 12864 memattr = 0x44; /* Normal, NC, No */ 12865 } 12866 cacheattrs->shareability = 2; /* outer sharable */ 12867 } else { 12868 memattr = 0x00; /* Device, nGnRnE */ 12869 } 12870 cacheattrs->attrs = memattr; 12871 return 0; 12872 } 12873 12874 if (regime_using_lpae_format(env, mmu_idx)) { 12875 return get_phys_addr_lpae(env, address, access_type, mmu_idx, false, 12876 phys_ptr, attrs, prot, page_size, 12877 fi, cacheattrs); 12878 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { 12879 return get_phys_addr_v6(env, address, access_type, mmu_idx, 12880 phys_ptr, attrs, prot, page_size, fi); 12881 } else { 12882 return get_phys_addr_v5(env, address, access_type, mmu_idx, 12883 phys_ptr, prot, page_size, fi); 12884 } 12885 } 12886 12887 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 12888 MemTxAttrs *attrs) 12889 { 12890 ARMCPU *cpu = ARM_CPU(cs); 12891 CPUARMState *env = &cpu->env; 12892 hwaddr phys_addr; 12893 target_ulong page_size; 12894 int prot; 12895 bool ret; 12896 ARMMMUFaultInfo fi = {}; 12897 ARMMMUIdx mmu_idx = arm_mmu_idx(env); 12898 ARMCacheAttrs cacheattrs = {}; 12899 12900 *attrs = (MemTxAttrs) {}; 12901 12902 ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr, 12903 attrs, &prot, &page_size, &fi, &cacheattrs); 12904 12905 if (ret) { 12906 return -1; 12907 } 12908 return phys_addr; 12909 } 12910 12911 #endif 12912 12913 /* Note that signed overflow is undefined in C. The following routines are 12914 careful to use unsigned types where modulo arithmetic is required. 12915 Failure to do so _will_ break on newer gcc. */ 12916 12917 /* Signed saturating arithmetic. */ 12918 12919 /* Perform 16-bit signed saturating addition. */ 12920 static inline uint16_t add16_sat(uint16_t a, uint16_t b) 12921 { 12922 uint16_t res; 12923 12924 res = a + b; 12925 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { 12926 if (a & 0x8000) 12927 res = 0x8000; 12928 else 12929 res = 0x7fff; 12930 } 12931 return res; 12932 } 12933 12934 /* Perform 8-bit signed saturating addition. */ 12935 static inline uint8_t add8_sat(uint8_t a, uint8_t b) 12936 { 12937 uint8_t res; 12938 12939 res = a + b; 12940 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { 12941 if (a & 0x80) 12942 res = 0x80; 12943 else 12944 res = 0x7f; 12945 } 12946 return res; 12947 } 12948 12949 /* Perform 16-bit signed saturating subtraction. */ 12950 static inline uint16_t sub16_sat(uint16_t a, uint16_t b) 12951 { 12952 uint16_t res; 12953 12954 res = a - b; 12955 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { 12956 if (a & 0x8000) 12957 res = 0x8000; 12958 else 12959 res = 0x7fff; 12960 } 12961 return res; 12962 } 12963 12964 /* Perform 8-bit signed saturating subtraction. */ 12965 static inline uint8_t sub8_sat(uint8_t a, uint8_t b) 12966 { 12967 uint8_t res; 12968 12969 res = a - b; 12970 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { 12971 if (a & 0x80) 12972 res = 0x80; 12973 else 12974 res = 0x7f; 12975 } 12976 return res; 12977 } 12978 12979 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); 12980 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); 12981 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); 12982 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); 12983 #define PFX q 12984 12985 #include "op_addsub.h" 12986 12987 /* Unsigned saturating arithmetic. */ 12988 static inline uint16_t add16_usat(uint16_t a, uint16_t b) 12989 { 12990 uint16_t res; 12991 res = a + b; 12992 if (res < a) 12993 res = 0xffff; 12994 return res; 12995 } 12996 12997 static inline uint16_t sub16_usat(uint16_t a, uint16_t b) 12998 { 12999 if (a > b) 13000 return a - b; 13001 else 13002 return 0; 13003 } 13004 13005 static inline uint8_t add8_usat(uint8_t a, uint8_t b) 13006 { 13007 uint8_t res; 13008 res = a + b; 13009 if (res < a) 13010 res = 0xff; 13011 return res; 13012 } 13013 13014 static inline uint8_t sub8_usat(uint8_t a, uint8_t b) 13015 { 13016 if (a > b) 13017 return a - b; 13018 else 13019 return 0; 13020 } 13021 13022 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); 13023 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); 13024 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); 13025 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); 13026 #define PFX uq 13027 13028 #include "op_addsub.h" 13029 13030 /* Signed modulo arithmetic. */ 13031 #define SARITH16(a, b, n, op) do { \ 13032 int32_t sum; \ 13033 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ 13034 RESULT(sum, n, 16); \ 13035 if (sum >= 0) \ 13036 ge |= 3 << (n * 2); \ 13037 } while(0) 13038 13039 #define SARITH8(a, b, n, op) do { \ 13040 int32_t sum; \ 13041 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ 13042 RESULT(sum, n, 8); \ 13043 if (sum >= 0) \ 13044 ge |= 1 << n; \ 13045 } while(0) 13046 13047 13048 #define ADD16(a, b, n) SARITH16(a, b, n, +) 13049 #define SUB16(a, b, n) SARITH16(a, b, n, -) 13050 #define ADD8(a, b, n) SARITH8(a, b, n, +) 13051 #define SUB8(a, b, n) SARITH8(a, b, n, -) 13052 #define PFX s 13053 #define ARITH_GE 13054 13055 #include "op_addsub.h" 13056 13057 /* Unsigned modulo arithmetic. */ 13058 #define ADD16(a, b, n) do { \ 13059 uint32_t sum; \ 13060 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ 13061 RESULT(sum, n, 16); \ 13062 if ((sum >> 16) == 1) \ 13063 ge |= 3 << (n * 2); \ 13064 } while(0) 13065 13066 #define ADD8(a, b, n) do { \ 13067 uint32_t sum; \ 13068 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ 13069 RESULT(sum, n, 8); \ 13070 if ((sum >> 8) == 1) \ 13071 ge |= 1 << n; \ 13072 } while(0) 13073 13074 #define SUB16(a, b, n) do { \ 13075 uint32_t sum; \ 13076 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ 13077 RESULT(sum, n, 16); \ 13078 if ((sum >> 16) == 0) \ 13079 ge |= 3 << (n * 2); \ 13080 } while(0) 13081 13082 #define SUB8(a, b, n) do { \ 13083 uint32_t sum; \ 13084 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ 13085 RESULT(sum, n, 8); \ 13086 if ((sum >> 8) == 0) \ 13087 ge |= 1 << n; \ 13088 } while(0) 13089 13090 #define PFX u 13091 #define ARITH_GE 13092 13093 #include "op_addsub.h" 13094 13095 /* Halved signed arithmetic. */ 13096 #define ADD16(a, b, n) \ 13097 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) 13098 #define SUB16(a, b, n) \ 13099 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) 13100 #define ADD8(a, b, n) \ 13101 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) 13102 #define SUB8(a, b, n) \ 13103 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) 13104 #define PFX sh 13105 13106 #include "op_addsub.h" 13107 13108 /* Halved unsigned arithmetic. */ 13109 #define ADD16(a, b, n) \ 13110 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) 13111 #define SUB16(a, b, n) \ 13112 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) 13113 #define ADD8(a, b, n) \ 13114 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) 13115 #define SUB8(a, b, n) \ 13116 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) 13117 #define PFX uh 13118 13119 #include "op_addsub.h" 13120 13121 static inline uint8_t do_usad(uint8_t a, uint8_t b) 13122 { 13123 if (a > b) 13124 return a - b; 13125 else 13126 return b - a; 13127 } 13128 13129 /* Unsigned sum of absolute byte differences. */ 13130 uint32_t HELPER(usad8)(uint32_t a, uint32_t b) 13131 { 13132 uint32_t sum; 13133 sum = do_usad(a, b); 13134 sum += do_usad(a >> 8, b >> 8); 13135 sum += do_usad(a >> 16, b >> 16); 13136 sum += do_usad(a >> 24, b >> 24); 13137 return sum; 13138 } 13139 13140 /* For ARMv6 SEL instruction. */ 13141 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) 13142 { 13143 uint32_t mask; 13144 13145 mask = 0; 13146 if (flags & 1) 13147 mask |= 0xff; 13148 if (flags & 2) 13149 mask |= 0xff00; 13150 if (flags & 4) 13151 mask |= 0xff0000; 13152 if (flags & 8) 13153 mask |= 0xff000000; 13154 return (a & mask) | (b & ~mask); 13155 } 13156 13157 /* CRC helpers. 13158 * The upper bytes of val (above the number specified by 'bytes') must have 13159 * been zeroed out by the caller. 13160 */ 13161 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) 13162 { 13163 uint8_t buf[4]; 13164 13165 stl_le_p(buf, val); 13166 13167 /* zlib crc32 converts the accumulator and output to one's complement. */ 13168 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; 13169 } 13170 13171 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) 13172 { 13173 uint8_t buf[4]; 13174 13175 stl_le_p(buf, val); 13176 13177 /* Linux crc32c converts the output to one's complement. */ 13178 return crc32c(acc, buf, bytes) ^ 0xffffffff; 13179 } 13180 13181 /* Return the exception level to which FP-disabled exceptions should 13182 * be taken, or 0 if FP is enabled. 13183 */ 13184 int fp_exception_el(CPUARMState *env, int cur_el) 13185 { 13186 #ifndef CONFIG_USER_ONLY 13187 /* CPACR and the CPTR registers don't exist before v6, so FP is 13188 * always accessible 13189 */ 13190 if (!arm_feature(env, ARM_FEATURE_V6)) { 13191 return 0; 13192 } 13193 13194 if (arm_feature(env, ARM_FEATURE_M)) { 13195 /* CPACR can cause a NOCP UsageFault taken to current security state */ 13196 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) { 13197 return 1; 13198 } 13199 13200 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) { 13201 if (!extract32(env->v7m.nsacr, 10, 1)) { 13202 /* FP insns cause a NOCP UsageFault taken to Secure */ 13203 return 3; 13204 } 13205 } 13206 13207 return 0; 13208 } 13209 13210 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: 13211 * 0, 2 : trap EL0 and EL1/PL1 accesses 13212 * 1 : trap only EL0 accesses 13213 * 3 : trap no accesses 13214 * This register is ignored if E2H+TGE are both set. 13215 */ 13216 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 13217 int fpen = extract32(env->cp15.cpacr_el1, 20, 2); 13218 13219 switch (fpen) { 13220 case 0: 13221 case 2: 13222 if (cur_el == 0 || cur_el == 1) { 13223 /* Trap to PL1, which might be EL1 or EL3 */ 13224 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 13225 return 3; 13226 } 13227 return 1; 13228 } 13229 if (cur_el == 3 && !is_a64(env)) { 13230 /* Secure PL1 running at EL3 */ 13231 return 3; 13232 } 13233 break; 13234 case 1: 13235 if (cur_el == 0) { 13236 return 1; 13237 } 13238 break; 13239 case 3: 13240 break; 13241 } 13242 } 13243 13244 /* 13245 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode 13246 * to control non-secure access to the FPU. It doesn't have any 13247 * effect if EL3 is AArch64 or if EL3 doesn't exist at all. 13248 */ 13249 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 13250 cur_el <= 2 && !arm_is_secure_below_el3(env))) { 13251 if (!extract32(env->cp15.nsacr, 10, 1)) { 13252 /* FP insns act as UNDEF */ 13253 return cur_el == 2 ? 2 : 1; 13254 } 13255 } 13256 13257 /* For the CPTR registers we don't need to guard with an ARM_FEATURE 13258 * check because zero bits in the registers mean "don't trap". 13259 */ 13260 13261 /* CPTR_EL2 : present in v7VE or v8 */ 13262 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1) 13263 && arm_is_el2_enabled(env)) { 13264 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */ 13265 return 2; 13266 } 13267 13268 /* CPTR_EL3 : present in v8 */ 13269 if (extract32(env->cp15.cptr_el[3], 10, 1)) { 13270 /* Trap all FP ops to EL3 */ 13271 return 3; 13272 } 13273 #endif 13274 return 0; 13275 } 13276 13277 /* Return the exception level we're running at if this is our mmu_idx */ 13278 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) 13279 { 13280 if (mmu_idx & ARM_MMU_IDX_M) { 13281 return mmu_idx & ARM_MMU_IDX_M_PRIV; 13282 } 13283 13284 switch (mmu_idx) { 13285 case ARMMMUIdx_E10_0: 13286 case ARMMMUIdx_E20_0: 13287 case ARMMMUIdx_SE10_0: 13288 case ARMMMUIdx_SE20_0: 13289 return 0; 13290 case ARMMMUIdx_E10_1: 13291 case ARMMMUIdx_E10_1_PAN: 13292 case ARMMMUIdx_SE10_1: 13293 case ARMMMUIdx_SE10_1_PAN: 13294 return 1; 13295 case ARMMMUIdx_E2: 13296 case ARMMMUIdx_E20_2: 13297 case ARMMMUIdx_E20_2_PAN: 13298 case ARMMMUIdx_SE2: 13299 case ARMMMUIdx_SE20_2: 13300 case ARMMMUIdx_SE20_2_PAN: 13301 return 2; 13302 case ARMMMUIdx_SE3: 13303 return 3; 13304 default: 13305 g_assert_not_reached(); 13306 } 13307 } 13308 13309 #ifndef CONFIG_TCG 13310 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) 13311 { 13312 g_assert_not_reached(); 13313 } 13314 #endif 13315 13316 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) 13317 { 13318 ARMMMUIdx idx; 13319 uint64_t hcr; 13320 13321 if (arm_feature(env, ARM_FEATURE_M)) { 13322 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); 13323 } 13324 13325 /* See ARM pseudo-function ELIsInHost. */ 13326 switch (el) { 13327 case 0: 13328 hcr = arm_hcr_el2_eff(env); 13329 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 13330 idx = ARMMMUIdx_E20_0; 13331 } else { 13332 idx = ARMMMUIdx_E10_0; 13333 } 13334 break; 13335 case 1: 13336 if (env->pstate & PSTATE_PAN) { 13337 idx = ARMMMUIdx_E10_1_PAN; 13338 } else { 13339 idx = ARMMMUIdx_E10_1; 13340 } 13341 break; 13342 case 2: 13343 /* Note that TGE does not apply at EL2. */ 13344 if (arm_hcr_el2_eff(env) & HCR_E2H) { 13345 if (env->pstate & PSTATE_PAN) { 13346 idx = ARMMMUIdx_E20_2_PAN; 13347 } else { 13348 idx = ARMMMUIdx_E20_2; 13349 } 13350 } else { 13351 idx = ARMMMUIdx_E2; 13352 } 13353 break; 13354 case 3: 13355 return ARMMMUIdx_SE3; 13356 default: 13357 g_assert_not_reached(); 13358 } 13359 13360 if (arm_is_secure_below_el3(env)) { 13361 idx &= ~ARM_MMU_IDX_A_NS; 13362 } 13363 13364 return idx; 13365 } 13366 13367 ARMMMUIdx arm_mmu_idx(CPUARMState *env) 13368 { 13369 return arm_mmu_idx_el(env, arm_current_el(env)); 13370 } 13371 13372 #ifndef CONFIG_USER_ONLY 13373 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 13374 { 13375 return stage_1_mmu_idx(arm_mmu_idx(env)); 13376 } 13377 #endif 13378 13379 static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el, 13380 ARMMMUIdx mmu_idx, 13381 CPUARMTBFlags flags) 13382 { 13383 DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el); 13384 DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx)); 13385 13386 if (arm_singlestep_active(env)) { 13387 DP_TBFLAG_ANY(flags, SS_ACTIVE, 1); 13388 } 13389 return flags; 13390 } 13391 13392 static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el, 13393 ARMMMUIdx mmu_idx, 13394 CPUARMTBFlags flags) 13395 { 13396 bool sctlr_b = arm_sctlr_b(env); 13397 13398 if (sctlr_b) { 13399 DP_TBFLAG_A32(flags, SCTLR__B, 1); 13400 } 13401 if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) { 13402 DP_TBFLAG_ANY(flags, BE_DATA, 1); 13403 } 13404 DP_TBFLAG_A32(flags, NS, !access_secure_reg(env)); 13405 13406 return rebuild_hflags_common(env, fp_el, mmu_idx, flags); 13407 } 13408 13409 static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el, 13410 ARMMMUIdx mmu_idx) 13411 { 13412 CPUARMTBFlags flags = {}; 13413 uint32_t ccr = env->v7m.ccr[env->v7m.secure]; 13414 13415 /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */ 13416 if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) { 13417 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); 13418 } 13419 13420 if (arm_v7m_is_handler_mode(env)) { 13421 DP_TBFLAG_M32(flags, HANDLER, 1); 13422 } 13423 13424 /* 13425 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN 13426 * is suppressing them because the requested execution priority 13427 * is less than 0. 13428 */ 13429 if (arm_feature(env, ARM_FEATURE_V8) && 13430 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) && 13431 (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) { 13432 DP_TBFLAG_M32(flags, STACKCHECK, 1); 13433 } 13434 13435 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); 13436 } 13437 13438 static CPUARMTBFlags rebuild_hflags_aprofile(CPUARMState *env) 13439 { 13440 CPUARMTBFlags flags = {}; 13441 13442 DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env)); 13443 return flags; 13444 } 13445 13446 static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, 13447 ARMMMUIdx mmu_idx) 13448 { 13449 CPUARMTBFlags flags = rebuild_hflags_aprofile(env); 13450 int el = arm_current_el(env); 13451 13452 if (arm_sctlr(env, el) & SCTLR_A) { 13453 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); 13454 } 13455 13456 if (arm_el_is_aa64(env, 1)) { 13457 DP_TBFLAG_A32(flags, VFPEN, 1); 13458 } 13459 13460 if (el < 2 && env->cp15.hstr_el2 && 13461 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 13462 DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1); 13463 } 13464 13465 if (env->uncached_cpsr & CPSR_IL) { 13466 DP_TBFLAG_ANY(flags, PSTATE__IL, 1); 13467 } 13468 13469 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); 13470 } 13471 13472 static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, 13473 ARMMMUIdx mmu_idx) 13474 { 13475 CPUARMTBFlags flags = rebuild_hflags_aprofile(env); 13476 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); 13477 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 13478 uint64_t sctlr; 13479 int tbii, tbid; 13480 13481 DP_TBFLAG_ANY(flags, AARCH64_STATE, 1); 13482 13483 /* Get control bits for tagged addresses. */ 13484 tbid = aa64_va_parameter_tbi(tcr, mmu_idx); 13485 tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx); 13486 13487 DP_TBFLAG_A64(flags, TBII, tbii); 13488 DP_TBFLAG_A64(flags, TBID, tbid); 13489 13490 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { 13491 int sve_el = sve_exception_el(env, el); 13492 uint32_t zcr_len; 13493 13494 /* 13495 * If SVE is disabled, but FP is enabled, 13496 * then the effective len is 0. 13497 */ 13498 if (sve_el != 0 && fp_el == 0) { 13499 zcr_len = 0; 13500 } else { 13501 zcr_len = sve_zcr_len_for_el(env, el); 13502 } 13503 DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el); 13504 DP_TBFLAG_A64(flags, ZCR_LEN, zcr_len); 13505 } 13506 13507 sctlr = regime_sctlr(env, stage1); 13508 13509 if (sctlr & SCTLR_A) { 13510 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); 13511 } 13512 13513 if (arm_cpu_data_is_big_endian_a64(el, sctlr)) { 13514 DP_TBFLAG_ANY(flags, BE_DATA, 1); 13515 } 13516 13517 if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) { 13518 /* 13519 * In order to save space in flags, we record only whether 13520 * pauth is "inactive", meaning all insns are implemented as 13521 * a nop, or "active" when some action must be performed. 13522 * The decision of which action to take is left to a helper. 13523 */ 13524 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) { 13525 DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1); 13526 } 13527 } 13528 13529 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { 13530 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */ 13531 if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) { 13532 DP_TBFLAG_A64(flags, BT, 1); 13533 } 13534 } 13535 13536 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */ 13537 if (!(env->pstate & PSTATE_UAO)) { 13538 switch (mmu_idx) { 13539 case ARMMMUIdx_E10_1: 13540 case ARMMMUIdx_E10_1_PAN: 13541 case ARMMMUIdx_SE10_1: 13542 case ARMMMUIdx_SE10_1_PAN: 13543 /* TODO: ARMv8.3-NV */ 13544 DP_TBFLAG_A64(flags, UNPRIV, 1); 13545 break; 13546 case ARMMMUIdx_E20_2: 13547 case ARMMMUIdx_E20_2_PAN: 13548 case ARMMMUIdx_SE20_2: 13549 case ARMMMUIdx_SE20_2_PAN: 13550 /* 13551 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is 13552 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR. 13553 */ 13554 if (env->cp15.hcr_el2 & HCR_TGE) { 13555 DP_TBFLAG_A64(flags, UNPRIV, 1); 13556 } 13557 break; 13558 default: 13559 break; 13560 } 13561 } 13562 13563 if (env->pstate & PSTATE_IL) { 13564 DP_TBFLAG_ANY(flags, PSTATE__IL, 1); 13565 } 13566 13567 if (cpu_isar_feature(aa64_mte, env_archcpu(env))) { 13568 /* 13569 * Set MTE_ACTIVE if any access may be Checked, and leave clear 13570 * if all accesses must be Unchecked: 13571 * 1) If no TBI, then there are no tags in the address to check, 13572 * 2) If Tag Check Override, then all accesses are Unchecked, 13573 * 3) If Tag Check Fail == 0, then Checked access have no effect, 13574 * 4) If no Allocation Tag Access, then all accesses are Unchecked. 13575 */ 13576 if (allocation_tag_access_enabled(env, el, sctlr)) { 13577 DP_TBFLAG_A64(flags, ATA, 1); 13578 if (tbid 13579 && !(env->pstate & PSTATE_TCO) 13580 && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) { 13581 DP_TBFLAG_A64(flags, MTE_ACTIVE, 1); 13582 } 13583 } 13584 /* And again for unprivileged accesses, if required. */ 13585 if (EX_TBFLAG_A64(flags, UNPRIV) 13586 && tbid 13587 && !(env->pstate & PSTATE_TCO) 13588 && (sctlr & SCTLR_TCF0) 13589 && allocation_tag_access_enabled(env, 0, sctlr)) { 13590 DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1); 13591 } 13592 /* Cache TCMA as well as TBI. */ 13593 DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx)); 13594 } 13595 13596 return rebuild_hflags_common(env, fp_el, mmu_idx, flags); 13597 } 13598 13599 static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env) 13600 { 13601 int el = arm_current_el(env); 13602 int fp_el = fp_exception_el(env, el); 13603 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 13604 13605 if (is_a64(env)) { 13606 return rebuild_hflags_a64(env, el, fp_el, mmu_idx); 13607 } else if (arm_feature(env, ARM_FEATURE_M)) { 13608 return rebuild_hflags_m32(env, fp_el, mmu_idx); 13609 } else { 13610 return rebuild_hflags_a32(env, fp_el, mmu_idx); 13611 } 13612 } 13613 13614 void arm_rebuild_hflags(CPUARMState *env) 13615 { 13616 env->hflags = rebuild_hflags_internal(env); 13617 } 13618 13619 /* 13620 * If we have triggered a EL state change we can't rely on the 13621 * translator having passed it to us, we need to recompute. 13622 */ 13623 void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env) 13624 { 13625 int el = arm_current_el(env); 13626 int fp_el = fp_exception_el(env, el); 13627 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 13628 13629 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); 13630 } 13631 13632 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el) 13633 { 13634 int fp_el = fp_exception_el(env, el); 13635 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 13636 13637 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); 13638 } 13639 13640 /* 13641 * If we have triggered a EL state change we can't rely on the 13642 * translator having passed it to us, we need to recompute. 13643 */ 13644 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env) 13645 { 13646 int el = arm_current_el(env); 13647 int fp_el = fp_exception_el(env, el); 13648 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 13649 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); 13650 } 13651 13652 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el) 13653 { 13654 int fp_el = fp_exception_el(env, el); 13655 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 13656 13657 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); 13658 } 13659 13660 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el) 13661 { 13662 int fp_el = fp_exception_el(env, el); 13663 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 13664 13665 env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx); 13666 } 13667 13668 static inline void assert_hflags_rebuild_correctly(CPUARMState *env) 13669 { 13670 #ifdef CONFIG_DEBUG_TCG 13671 CPUARMTBFlags c = env->hflags; 13672 CPUARMTBFlags r = rebuild_hflags_internal(env); 13673 13674 if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) { 13675 fprintf(stderr, "TCG hflags mismatch " 13676 "(current:(0x%08x,0x" TARGET_FMT_lx ")" 13677 " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n", 13678 c.flags, c.flags2, r.flags, r.flags2); 13679 abort(); 13680 } 13681 #endif 13682 } 13683 13684 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 13685 target_ulong *cs_base, uint32_t *pflags) 13686 { 13687 CPUARMTBFlags flags; 13688 13689 assert_hflags_rebuild_correctly(env); 13690 flags = env->hflags; 13691 13692 if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) { 13693 *pc = env->pc; 13694 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { 13695 DP_TBFLAG_A64(flags, BTYPE, env->btype); 13696 } 13697 } else { 13698 *pc = env->regs[15]; 13699 13700 if (arm_feature(env, ARM_FEATURE_M)) { 13701 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && 13702 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) 13703 != env->v7m.secure) { 13704 DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1); 13705 } 13706 13707 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && 13708 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || 13709 (env->v7m.secure && 13710 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { 13711 /* 13712 * ASPEN is set, but FPCA/SFPA indicate that there is no 13713 * active FP context; we must create a new FP context before 13714 * executing any FP insn. 13715 */ 13716 DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1); 13717 } 13718 13719 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; 13720 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { 13721 DP_TBFLAG_M32(flags, LSPACT, 1); 13722 } 13723 } else { 13724 /* 13725 * Note that XSCALE_CPAR shares bits with VECSTRIDE. 13726 * Note that VECLEN+VECSTRIDE are RES0 for M-profile. 13727 */ 13728 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 13729 DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar); 13730 } else { 13731 DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len); 13732 DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride); 13733 } 13734 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { 13735 DP_TBFLAG_A32(flags, VFPEN, 1); 13736 } 13737 } 13738 13739 DP_TBFLAG_AM32(flags, THUMB, env->thumb); 13740 DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits); 13741 } 13742 13743 /* 13744 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 13745 * states defined in the ARM ARM for software singlestep: 13746 * SS_ACTIVE PSTATE.SS State 13747 * 0 x Inactive (the TB flag for SS is always 0) 13748 * 1 0 Active-pending 13749 * 1 1 Active-not-pending 13750 * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB. 13751 */ 13752 if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) { 13753 DP_TBFLAG_ANY(flags, PSTATE__SS, 1); 13754 } 13755 13756 *pflags = flags.flags; 13757 *cs_base = flags.flags2; 13758 } 13759 13760 #ifdef TARGET_AARCH64 13761 /* 13762 * The manual says that when SVE is enabled and VQ is widened the 13763 * implementation is allowed to zero the previously inaccessible 13764 * portion of the registers. The corollary to that is that when 13765 * SVE is enabled and VQ is narrowed we are also allowed to zero 13766 * the now inaccessible portion of the registers. 13767 * 13768 * The intent of this is that no predicate bit beyond VQ is ever set. 13769 * Which means that some operations on predicate registers themselves 13770 * may operate on full uint64_t or even unrolled across the maximum 13771 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally 13772 * may well be cheaper than conditionals to restrict the operation 13773 * to the relevant portion of a uint16_t[16]. 13774 */ 13775 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) 13776 { 13777 int i, j; 13778 uint64_t pmask; 13779 13780 assert(vq >= 1 && vq <= ARM_MAX_VQ); 13781 assert(vq <= env_archcpu(env)->sve_max_vq); 13782 13783 /* Zap the high bits of the zregs. */ 13784 for (i = 0; i < 32; i++) { 13785 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); 13786 } 13787 13788 /* Zap the high bits of the pregs and ffr. */ 13789 pmask = 0; 13790 if (vq & 3) { 13791 pmask = ~(-1ULL << (16 * (vq & 3))); 13792 } 13793 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) { 13794 for (i = 0; i < 17; ++i) { 13795 env->vfp.pregs[i].p[j] &= pmask; 13796 } 13797 pmask = 0; 13798 } 13799 } 13800 13801 /* 13802 * Notice a change in SVE vector size when changing EL. 13803 */ 13804 void aarch64_sve_change_el(CPUARMState *env, int old_el, 13805 int new_el, bool el0_a64) 13806 { 13807 ARMCPU *cpu = env_archcpu(env); 13808 int old_len, new_len; 13809 bool old_a64, new_a64; 13810 13811 /* Nothing to do if no SVE. */ 13812 if (!cpu_isar_feature(aa64_sve, cpu)) { 13813 return; 13814 } 13815 13816 /* Nothing to do if FP is disabled in either EL. */ 13817 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) { 13818 return; 13819 } 13820 13821 /* 13822 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped 13823 * at ELx, or not available because the EL is in AArch32 state, then 13824 * for all purposes other than a direct read, the ZCR_ELx.LEN field 13825 * has an effective value of 0". 13826 * 13827 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). 13828 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition 13829 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that 13830 * we already have the correct register contents when encountering the 13831 * vq0->vq0 transition between EL0->EL1. 13832 */ 13833 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; 13834 old_len = (old_a64 && !sve_exception_el(env, old_el) 13835 ? sve_zcr_len_for_el(env, old_el) : 0); 13836 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; 13837 new_len = (new_a64 && !sve_exception_el(env, new_el) 13838 ? sve_zcr_len_for_el(env, new_el) : 0); 13839 13840 /* When changing vector length, clear inaccessible state. */ 13841 if (new_len < old_len) { 13842 aarch64_sve_narrow_vq(env, new_len + 1); 13843 } 13844 } 13845 #endif 13846