1 /* 2 * ARM implementation of KVM hooks, 64 bit specific code 3 * 4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems 5 * Copyright Alex Bennée 2014, Linaro 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or later. 8 * See the COPYING file in the top-level directory. 9 * 10 */ 11 12 #include "qemu/osdep.h" 13 #include <sys/ioctl.h> 14 #include <sys/ptrace.h> 15 16 #include <linux/elf.h> 17 #include <linux/kvm.h> 18 19 #include "qapi/error.h" 20 #include "cpu.h" 21 #include "qemu/timer.h" 22 #include "qemu/error-report.h" 23 #include "qemu/host-utils.h" 24 #include "qemu/main-loop.h" 25 #include "exec/gdbstub.h" 26 #include "sysemu/runstate.h" 27 #include "sysemu/kvm.h" 28 #include "sysemu/kvm_int.h" 29 #include "kvm_arm.h" 30 #include "internals.h" 31 #include "hw/acpi/acpi.h" 32 #include "hw/acpi/ghes.h" 33 #include "hw/arm/virt.h" 34 35 static bool have_guest_debug; 36 37 /* 38 * Although the ARM implementation of hardware assisted debugging 39 * allows for different breakpoints per-core, the current GDB 40 * interface treats them as a global pool of registers (which seems to 41 * be the case for x86, ppc and s390). As a result we store one copy 42 * of registers which is used for all active cores. 43 * 44 * Write access is serialised by virtue of the GDB protocol which 45 * updates things. Read access (i.e. when the values are copied to the 46 * vCPU) is also gated by GDB's run control. 47 * 48 * This is not unreasonable as most of the time debugging kernels you 49 * never know which core will eventually execute your function. 50 */ 51 52 typedef struct { 53 uint64_t bcr; 54 uint64_t bvr; 55 } HWBreakpoint; 56 57 /* The watchpoint registers can cover more area than the requested 58 * watchpoint so we need to store the additional information 59 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub 60 * when the watchpoint is hit. 61 */ 62 typedef struct { 63 uint64_t wcr; 64 uint64_t wvr; 65 CPUWatchpoint details; 66 } HWWatchpoint; 67 68 /* Maximum and current break/watch point counts */ 69 int max_hw_bps, max_hw_wps; 70 GArray *hw_breakpoints, *hw_watchpoints; 71 72 #define cur_hw_wps (hw_watchpoints->len) 73 #define cur_hw_bps (hw_breakpoints->len) 74 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) 75 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) 76 77 /** 78 * kvm_arm_init_debug() - check for guest debug capabilities 79 * @cs: CPUState 80 * 81 * kvm_check_extension returns the number of debug registers we have 82 * or 0 if we have none. 83 * 84 */ 85 static void kvm_arm_init_debug(CPUState *cs) 86 { 87 have_guest_debug = kvm_check_extension(cs->kvm_state, 88 KVM_CAP_SET_GUEST_DEBUG); 89 90 max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS); 91 hw_watchpoints = g_array_sized_new(true, true, 92 sizeof(HWWatchpoint), max_hw_wps); 93 94 max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS); 95 hw_breakpoints = g_array_sized_new(true, true, 96 sizeof(HWBreakpoint), max_hw_bps); 97 return; 98 } 99 100 /** 101 * insert_hw_breakpoint() 102 * @addr: address of breakpoint 103 * 104 * See ARM ARM D2.9.1 for details but here we are only going to create 105 * simple un-linked breakpoints (i.e. we don't chain breakpoints 106 * together to match address and context or vmid). The hardware is 107 * capable of fancier matching but that will require exposing that 108 * fanciness to GDB's interface 109 * 110 * DBGBCR<n>_EL1, Debug Breakpoint Control Registers 111 * 112 * 31 24 23 20 19 16 15 14 13 12 9 8 5 4 3 2 1 0 113 * +------+------+-------+-----+----+------+-----+------+-----+---+ 114 * | RES0 | BT | LBN | SSC | HMC| RES0 | BAS | RES0 | PMC | E | 115 * +------+------+-------+-----+----+------+-----+------+-----+---+ 116 * 117 * BT: Breakpoint type (0 = unlinked address match) 118 * LBN: Linked BP number (0 = unused) 119 * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12) 120 * BAS: Byte Address Select (RES1 for AArch64) 121 * E: Enable bit 122 * 123 * DBGBVR<n>_EL1, Debug Breakpoint Value Registers 124 * 125 * 63 53 52 49 48 2 1 0 126 * +------+-----------+----------+-----+ 127 * | RESS | VA[52:49] | VA[48:2] | 0 0 | 128 * +------+-----------+----------+-----+ 129 * 130 * Depending on the addressing mode bits the top bits of the register 131 * are a sign extension of the highest applicable VA bit. Some 132 * versions of GDB don't do it correctly so we ensure they are correct 133 * here so future PC comparisons will work properly. 134 */ 135 136 static int insert_hw_breakpoint(target_ulong addr) 137 { 138 HWBreakpoint brk = { 139 .bcr = 0x1, /* BCR E=1, enable */ 140 .bvr = sextract64(addr, 0, 53) 141 }; 142 143 if (cur_hw_bps >= max_hw_bps) { 144 return -ENOBUFS; 145 } 146 147 brk.bcr = deposit32(brk.bcr, 1, 2, 0x3); /* PMC = 11 */ 148 brk.bcr = deposit32(brk.bcr, 5, 4, 0xf); /* BAS = RES1 */ 149 150 g_array_append_val(hw_breakpoints, brk); 151 152 return 0; 153 } 154 155 /** 156 * delete_hw_breakpoint() 157 * @pc: address of breakpoint 158 * 159 * Delete a breakpoint and shuffle any above down 160 */ 161 162 static int delete_hw_breakpoint(target_ulong pc) 163 { 164 int i; 165 for (i = 0; i < hw_breakpoints->len; i++) { 166 HWBreakpoint *brk = get_hw_bp(i); 167 if (brk->bvr == pc) { 168 g_array_remove_index(hw_breakpoints, i); 169 return 0; 170 } 171 } 172 return -ENOENT; 173 } 174 175 /** 176 * insert_hw_watchpoint() 177 * @addr: address of watch point 178 * @len: size of area 179 * @type: type of watch point 180 * 181 * See ARM ARM D2.10. As with the breakpoints we can do some advanced 182 * stuff if we want to. The watch points can be linked with the break 183 * points above to make them context aware. However for simplicity 184 * currently we only deal with simple read/write watch points. 185 * 186 * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers 187 * 188 * 31 29 28 24 23 21 20 19 16 15 14 13 12 5 4 3 2 1 0 189 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+ 190 * | RES0 | MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E | 191 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+ 192 * 193 * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes)) 194 * WT: 0 - unlinked, 1 - linked (not currently used) 195 * LBN: Linked BP number (not currently used) 196 * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11) 197 * BAS: Byte Address Select 198 * LSC: Load/Store control (01: load, 10: store, 11: both) 199 * E: Enable 200 * 201 * The bottom 2 bits of the value register are masked. Therefore to 202 * break on any sizes smaller than an unaligned word you need to set 203 * MASK=0, BAS=bit per byte in question. For larger regions (^2) you 204 * need to ensure you mask the address as required and set BAS=0xff 205 */ 206 207 static int insert_hw_watchpoint(target_ulong addr, 208 target_ulong len, int type) 209 { 210 HWWatchpoint wp = { 211 .wcr = R_DBGWCR_E_MASK, /* E=1, enable */ 212 .wvr = addr & (~0x7ULL), 213 .details = { .vaddr = addr, .len = len } 214 }; 215 216 if (cur_hw_wps >= max_hw_wps) { 217 return -ENOBUFS; 218 } 219 220 /* 221 * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state, 222 * valid whether EL3 is implemented or not 223 */ 224 wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, PAC, 3); 225 226 switch (type) { 227 case GDB_WATCHPOINT_READ: 228 wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 1); 229 wp.details.flags = BP_MEM_READ; 230 break; 231 case GDB_WATCHPOINT_WRITE: 232 wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 2); 233 wp.details.flags = BP_MEM_WRITE; 234 break; 235 case GDB_WATCHPOINT_ACCESS: 236 wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, LSC, 3); 237 wp.details.flags = BP_MEM_ACCESS; 238 break; 239 default: 240 g_assert_not_reached(); 241 break; 242 } 243 if (len <= 8) { 244 /* we align the address and set the bits in BAS */ 245 int off = addr & 0x7; 246 int bas = (1 << len) - 1; 247 248 wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas); 249 } else { 250 /* For ranges above 8 bytes we need to be a power of 2 */ 251 if (is_power_of_2(len)) { 252 int bits = ctz64(len); 253 254 wp.wvr &= ~((1 << bits) - 1); 255 wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, MASK, bits); 256 wp.wcr = FIELD_DP64(wp.wcr, DBGWCR, BAS, 0xff); 257 } else { 258 return -ENOBUFS; 259 } 260 } 261 262 g_array_append_val(hw_watchpoints, wp); 263 return 0; 264 } 265 266 267 static bool check_watchpoint_in_range(int i, target_ulong addr) 268 { 269 HWWatchpoint *wp = get_hw_wp(i); 270 uint64_t addr_top, addr_bottom = wp->wvr; 271 int bas = extract32(wp->wcr, 5, 8); 272 int mask = extract32(wp->wcr, 24, 4); 273 274 if (mask) { 275 addr_top = addr_bottom + (1 << mask); 276 } else { 277 /* BAS must be contiguous but can offset against the base 278 * address in DBGWVR */ 279 addr_bottom = addr_bottom + ctz32(bas); 280 addr_top = addr_bottom + clo32(bas); 281 } 282 283 if (addr >= addr_bottom && addr <= addr_top) { 284 return true; 285 } 286 287 return false; 288 } 289 290 /** 291 * delete_hw_watchpoint() 292 * @addr: address of breakpoint 293 * 294 * Delete a breakpoint and shuffle any above down 295 */ 296 297 static int delete_hw_watchpoint(target_ulong addr, 298 target_ulong len, int type) 299 { 300 int i; 301 for (i = 0; i < cur_hw_wps; i++) { 302 if (check_watchpoint_in_range(i, addr)) { 303 g_array_remove_index(hw_watchpoints, i); 304 return 0; 305 } 306 } 307 return -ENOENT; 308 } 309 310 311 int kvm_arch_insert_hw_breakpoint(target_ulong addr, 312 target_ulong len, int type) 313 { 314 switch (type) { 315 case GDB_BREAKPOINT_HW: 316 return insert_hw_breakpoint(addr); 317 break; 318 case GDB_WATCHPOINT_READ: 319 case GDB_WATCHPOINT_WRITE: 320 case GDB_WATCHPOINT_ACCESS: 321 return insert_hw_watchpoint(addr, len, type); 322 default: 323 return -ENOSYS; 324 } 325 } 326 327 int kvm_arch_remove_hw_breakpoint(target_ulong addr, 328 target_ulong len, int type) 329 { 330 switch (type) { 331 case GDB_BREAKPOINT_HW: 332 return delete_hw_breakpoint(addr); 333 case GDB_WATCHPOINT_READ: 334 case GDB_WATCHPOINT_WRITE: 335 case GDB_WATCHPOINT_ACCESS: 336 return delete_hw_watchpoint(addr, len, type); 337 default: 338 return -ENOSYS; 339 } 340 } 341 342 343 void kvm_arch_remove_all_hw_breakpoints(void) 344 { 345 if (cur_hw_wps > 0) { 346 g_array_remove_range(hw_watchpoints, 0, cur_hw_wps); 347 } 348 if (cur_hw_bps > 0) { 349 g_array_remove_range(hw_breakpoints, 0, cur_hw_bps); 350 } 351 } 352 353 void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr) 354 { 355 int i; 356 memset(ptr, 0, sizeof(struct kvm_guest_debug_arch)); 357 358 for (i = 0; i < max_hw_wps; i++) { 359 HWWatchpoint *wp = get_hw_wp(i); 360 ptr->dbg_wcr[i] = wp->wcr; 361 ptr->dbg_wvr[i] = wp->wvr; 362 } 363 for (i = 0; i < max_hw_bps; i++) { 364 HWBreakpoint *bp = get_hw_bp(i); 365 ptr->dbg_bcr[i] = bp->bcr; 366 ptr->dbg_bvr[i] = bp->bvr; 367 } 368 } 369 370 bool kvm_arm_hw_debug_active(CPUState *cs) 371 { 372 return ((cur_hw_wps > 0) || (cur_hw_bps > 0)); 373 } 374 375 static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc) 376 { 377 int i; 378 379 for (i = 0; i < cur_hw_bps; i++) { 380 HWBreakpoint *bp = get_hw_bp(i); 381 if (bp->bvr == pc) { 382 return true; 383 } 384 } 385 return false; 386 } 387 388 static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr) 389 { 390 int i; 391 392 for (i = 0; i < cur_hw_wps; i++) { 393 if (check_watchpoint_in_range(i, addr)) { 394 return &get_hw_wp(i)->details; 395 } 396 } 397 return NULL; 398 } 399 400 static bool kvm_arm_set_device_attr(CPUState *cs, struct kvm_device_attr *attr, 401 const char *name) 402 { 403 int err; 404 405 err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr); 406 if (err != 0) { 407 error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err)); 408 return false; 409 } 410 411 err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr); 412 if (err != 0) { 413 error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err)); 414 return false; 415 } 416 417 return true; 418 } 419 420 void kvm_arm_pmu_init(CPUState *cs) 421 { 422 struct kvm_device_attr attr = { 423 .group = KVM_ARM_VCPU_PMU_V3_CTRL, 424 .attr = KVM_ARM_VCPU_PMU_V3_INIT, 425 }; 426 427 if (!ARM_CPU(cs)->has_pmu) { 428 return; 429 } 430 if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) { 431 error_report("failed to init PMU"); 432 abort(); 433 } 434 } 435 436 void kvm_arm_pmu_set_irq(CPUState *cs, int irq) 437 { 438 struct kvm_device_attr attr = { 439 .group = KVM_ARM_VCPU_PMU_V3_CTRL, 440 .addr = (intptr_t)&irq, 441 .attr = KVM_ARM_VCPU_PMU_V3_IRQ, 442 }; 443 444 if (!ARM_CPU(cs)->has_pmu) { 445 return; 446 } 447 if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) { 448 error_report("failed to set irq for PMU"); 449 abort(); 450 } 451 } 452 453 void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa) 454 { 455 struct kvm_device_attr attr = { 456 .group = KVM_ARM_VCPU_PVTIME_CTRL, 457 .attr = KVM_ARM_VCPU_PVTIME_IPA, 458 .addr = (uint64_t)&ipa, 459 }; 460 461 if (ARM_CPU(cs)->kvm_steal_time == ON_OFF_AUTO_OFF) { 462 return; 463 } 464 if (!kvm_arm_set_device_attr(cs, &attr, "PVTIME IPA")) { 465 error_report("failed to init PVTIME IPA"); 466 abort(); 467 } 468 } 469 470 static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id) 471 { 472 uint64_t ret; 473 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret }; 474 int err; 475 476 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); 477 err = ioctl(fd, KVM_GET_ONE_REG, &idreg); 478 if (err < 0) { 479 return -1; 480 } 481 *pret = ret; 482 return 0; 483 } 484 485 static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id) 486 { 487 struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret }; 488 489 assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); 490 return ioctl(fd, KVM_GET_ONE_REG, &idreg); 491 } 492 493 static bool kvm_arm_pauth_supported(void) 494 { 495 return (kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_ADDRESS) && 496 kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC)); 497 } 498 499 bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) 500 { 501 /* Identify the feature bits corresponding to the host CPU, and 502 * fill out the ARMHostCPUClass fields accordingly. To do this 503 * we have to create a scratch VM, create a single CPU inside it, 504 * and then query that CPU for the relevant ID registers. 505 */ 506 int fdarray[3]; 507 bool sve_supported; 508 bool pmu_supported = false; 509 uint64_t features = 0; 510 uint64_t t; 511 int err; 512 513 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however 514 * we know these will only support creating one kind of guest CPU, 515 * which is its preferred CPU type. Fortunately these old kernels 516 * support only a very limited number of CPUs. 517 */ 518 static const uint32_t cpus_to_try[] = { 519 KVM_ARM_TARGET_AEM_V8, 520 KVM_ARM_TARGET_FOUNDATION_V8, 521 KVM_ARM_TARGET_CORTEX_A57, 522 QEMU_KVM_ARM_TARGET_NONE 523 }; 524 /* 525 * target = -1 informs kvm_arm_create_scratch_host_vcpu() 526 * to use the preferred target 527 */ 528 struct kvm_vcpu_init init = { .target = -1, }; 529 530 /* 531 * Ask for Pointer Authentication if supported. We can't play the 532 * SVE trick of synthesising the ID reg as KVM won't tell us 533 * whether we have the architected or IMPDEF version of PAuth, so 534 * we have to use the actual ID regs. 535 */ 536 if (kvm_arm_pauth_supported()) { 537 init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | 538 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); 539 } 540 541 if (kvm_arm_pmu_supported()) { 542 init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; 543 pmu_supported = true; 544 } 545 546 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { 547 return false; 548 } 549 550 ahcf->target = init.target; 551 ahcf->dtb_compatible = "arm,arm-v8"; 552 553 err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, 554 ARM64_SYS_REG(3, 0, 0, 4, 0)); 555 if (unlikely(err < 0)) { 556 /* 557 * Before v4.15, the kernel only exposed a limited number of system 558 * registers, not including any of the interesting AArch64 ID regs. 559 * For the most part we could leave these fields as zero with minimal 560 * effect, since this does not affect the values seen by the guest. 561 * 562 * However, it could cause problems down the line for QEMU, 563 * so provide a minimal v8.0 default. 564 * 565 * ??? Could read MIDR and use knowledge from cpu64.c. 566 * ??? Could map a page of memory into our temp guest and 567 * run the tiniest of hand-crafted kernels to extract 568 * the values seen by the guest. 569 * ??? Either of these sounds like too much effort just 570 * to work around running a modern host kernel. 571 */ 572 ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ 573 err = 0; 574 } else { 575 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, 576 ARM64_SYS_REG(3, 0, 0, 4, 1)); 577 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, 578 ARM64_SYS_REG(3, 0, 0, 5, 0)); 579 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, 580 ARM64_SYS_REG(3, 0, 0, 5, 1)); 581 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, 582 ARM64_SYS_REG(3, 0, 0, 6, 0)); 583 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, 584 ARM64_SYS_REG(3, 0, 0, 6, 1)); 585 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, 586 ARM64_SYS_REG(3, 0, 0, 7, 0)); 587 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, 588 ARM64_SYS_REG(3, 0, 0, 7, 1)); 589 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2, 590 ARM64_SYS_REG(3, 0, 0, 7, 2)); 591 592 /* 593 * Note that if AArch32 support is not present in the host, 594 * the AArch32 sysregs are present to be read, but will 595 * return UNKNOWN values. This is neither better nor worse 596 * than skipping the reads and leaving 0, as we must avoid 597 * considering the values in every case. 598 */ 599 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0, 600 ARM64_SYS_REG(3, 0, 0, 1, 0)); 601 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, 602 ARM64_SYS_REG(3, 0, 0, 1, 1)); 603 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, 604 ARM64_SYS_REG(3, 0, 0, 3, 4)); 605 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, 606 ARM64_SYS_REG(3, 0, 0, 1, 2)); 607 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, 608 ARM64_SYS_REG(3, 0, 0, 1, 4)); 609 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, 610 ARM64_SYS_REG(3, 0, 0, 1, 5)); 611 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, 612 ARM64_SYS_REG(3, 0, 0, 1, 6)); 613 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, 614 ARM64_SYS_REG(3, 0, 0, 1, 7)); 615 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, 616 ARM64_SYS_REG(3, 0, 0, 2, 0)); 617 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, 618 ARM64_SYS_REG(3, 0, 0, 2, 1)); 619 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, 620 ARM64_SYS_REG(3, 0, 0, 2, 2)); 621 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, 622 ARM64_SYS_REG(3, 0, 0, 2, 3)); 623 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, 624 ARM64_SYS_REG(3, 0, 0, 2, 4)); 625 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, 626 ARM64_SYS_REG(3, 0, 0, 2, 5)); 627 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, 628 ARM64_SYS_REG(3, 0, 0, 2, 6)); 629 err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, 630 ARM64_SYS_REG(3, 0, 0, 2, 7)); 631 632 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, 633 ARM64_SYS_REG(3, 0, 0, 3, 0)); 634 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, 635 ARM64_SYS_REG(3, 0, 0, 3, 1)); 636 err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, 637 ARM64_SYS_REG(3, 0, 0, 3, 2)); 638 639 /* 640 * DBGDIDR is a bit complicated because the kernel doesn't 641 * provide an accessor for it in 64-bit mode, which is what this 642 * scratch VM is in, and there's no architected "64-bit sysreg 643 * which reads the same as the 32-bit register" the way there is 644 * for other ID registers. Instead we synthesize a value from the 645 * AArch64 ID_AA64DFR0, the same way the kernel code in 646 * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does. 647 * We only do this if the CPU supports AArch32 at EL1. 648 */ 649 if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { 650 int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); 651 int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); 652 int ctx_cmps = 653 FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); 654 int version = 6; /* ARMv8 debug architecture */ 655 bool has_el3 = 656 !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); 657 uint32_t dbgdidr = 0; 658 659 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps); 660 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps); 661 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps); 662 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version); 663 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3); 664 dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3); 665 dbgdidr |= (1 << 15); /* RES1 bit */ 666 ahcf->isar.dbgdidr = dbgdidr; 667 } 668 669 if (pmu_supported) { 670 /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */ 671 err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0, 672 ARM64_SYS_REG(3, 3, 9, 12, 0)); 673 } 674 } 675 676 sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0; 677 678 /* Add feature bits that can't appear until after VCPU init. */ 679 if (sve_supported) { 680 t = ahcf->isar.id_aa64pfr0; 681 t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1); 682 ahcf->isar.id_aa64pfr0 = t; 683 684 /* 685 * Before v5.1, KVM did not support SVE and did not expose 686 * ID_AA64ZFR0_EL1 even as RAZ. After v5.1, KVM still does 687 * not expose the register to "user" requests like this 688 * unless the host supports SVE. 689 */ 690 err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, 691 ARM64_SYS_REG(3, 0, 0, 4, 4)); 692 } 693 694 kvm_arm_destroy_scratch_host_vcpu(fdarray); 695 696 if (err < 0) { 697 return false; 698 } 699 700 /* 701 * We can assume any KVM supporting CPU is at least a v8 702 * with VFPv4+Neon; this in turn implies most of the other 703 * feature bits. 704 */ 705 features |= 1ULL << ARM_FEATURE_V8; 706 features |= 1ULL << ARM_FEATURE_NEON; 707 features |= 1ULL << ARM_FEATURE_AARCH64; 708 features |= 1ULL << ARM_FEATURE_PMU; 709 features |= 1ULL << ARM_FEATURE_GENERIC_TIMER; 710 711 ahcf->features = features; 712 713 return true; 714 } 715 716 void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) 717 { 718 bool has_steal_time = kvm_arm_steal_time_supported(); 719 720 if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) { 721 if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 722 cpu->kvm_steal_time = ON_OFF_AUTO_OFF; 723 } else { 724 cpu->kvm_steal_time = ON_OFF_AUTO_ON; 725 } 726 } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) { 727 if (!has_steal_time) { 728 error_setg(errp, "'kvm-steal-time' cannot be enabled " 729 "on this host"); 730 return; 731 } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 732 /* 733 * DEN0057A chapter 2 says "This specification only covers 734 * systems in which the Execution state of the hypervisor 735 * as well as EL1 of virtual machines is AArch64.". And, 736 * to ensure that, the smc/hvc calls are only specified as 737 * smc64/hvc64. 738 */ 739 error_setg(errp, "'kvm-steal-time' cannot be enabled " 740 "for AArch32 guests"); 741 return; 742 } 743 } 744 } 745 746 bool kvm_arm_aarch32_supported(void) 747 { 748 return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT); 749 } 750 751 bool kvm_arm_sve_supported(void) 752 { 753 return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE); 754 } 755 756 bool kvm_arm_steal_time_supported(void) 757 { 758 return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME); 759 } 760 761 QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1); 762 763 void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map) 764 { 765 /* Only call this function if kvm_arm_sve_supported() returns true. */ 766 static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS]; 767 static bool probed; 768 uint32_t vq = 0; 769 int i, j; 770 771 bitmap_zero(map, ARM_MAX_VQ); 772 773 /* 774 * KVM ensures all host CPUs support the same set of vector lengths. 775 * So we only need to create the scratch VCPUs once and then cache 776 * the results. 777 */ 778 if (!probed) { 779 struct kvm_vcpu_init init = { 780 .target = -1, 781 .features[0] = (1 << KVM_ARM_VCPU_SVE), 782 }; 783 struct kvm_one_reg reg = { 784 .id = KVM_REG_ARM64_SVE_VLS, 785 .addr = (uint64_t)&vls[0], 786 }; 787 int fdarray[3], ret; 788 789 probed = true; 790 791 if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) { 792 error_report("failed to create scratch VCPU with SVE enabled"); 793 abort(); 794 } 795 ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®); 796 kvm_arm_destroy_scratch_host_vcpu(fdarray); 797 if (ret) { 798 error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s", 799 strerror(errno)); 800 abort(); 801 } 802 803 for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) { 804 if (vls[i]) { 805 vq = 64 - clz64(vls[i]) + i * 64; 806 break; 807 } 808 } 809 if (vq > ARM_MAX_VQ) { 810 warn_report("KVM supports vector lengths larger than " 811 "QEMU can enable"); 812 } 813 } 814 815 for (i = 0; i < KVM_ARM64_SVE_VLS_WORDS; ++i) { 816 if (!vls[i]) { 817 continue; 818 } 819 for (j = 1; j <= 64; ++j) { 820 vq = j + i * 64; 821 if (vq > ARM_MAX_VQ) { 822 return; 823 } 824 if (vls[i] & (1UL << (j - 1))) { 825 set_bit(vq - 1, map); 826 } 827 } 828 } 829 } 830 831 static int kvm_arm_sve_set_vls(CPUState *cs) 832 { 833 uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = {0}; 834 struct kvm_one_reg reg = { 835 .id = KVM_REG_ARM64_SVE_VLS, 836 .addr = (uint64_t)&vls[0], 837 }; 838 ARMCPU *cpu = ARM_CPU(cs); 839 uint32_t vq; 840 int i, j; 841 842 assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); 843 844 for (vq = 1; vq <= cpu->sve_max_vq; ++vq) { 845 if (test_bit(vq - 1, cpu->sve_vq_map)) { 846 i = (vq - 1) / 64; 847 j = (vq - 1) % 64; 848 vls[i] |= 1UL << j; 849 } 850 } 851 852 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 853 } 854 855 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 856 857 int kvm_arch_init_vcpu(CPUState *cs) 858 { 859 int ret; 860 uint64_t mpidr; 861 ARMCPU *cpu = ARM_CPU(cs); 862 CPUARMState *env = &cpu->env; 863 uint64_t psciver; 864 865 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || 866 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { 867 error_report("KVM is not supported for this guest CPU type"); 868 return -EINVAL; 869 } 870 871 qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs); 872 873 /* Determine init features for this CPU */ 874 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); 875 if (cs->start_powered_off) { 876 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; 877 } 878 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { 879 cpu->psci_version = QEMU_PSCI_VERSION_0_2; 880 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; 881 } 882 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 883 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT; 884 } 885 if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) { 886 cpu->has_pmu = false; 887 } 888 if (cpu->has_pmu) { 889 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; 890 } else { 891 env->features &= ~(1ULL << ARM_FEATURE_PMU); 892 } 893 if (cpu_isar_feature(aa64_sve, cpu)) { 894 assert(kvm_arm_sve_supported()); 895 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE; 896 } 897 if (cpu_isar_feature(aa64_pauth, cpu)) { 898 cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | 899 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); 900 } 901 902 /* Do KVM_ARM_VCPU_INIT ioctl */ 903 ret = kvm_arm_vcpu_init(cs); 904 if (ret) { 905 return ret; 906 } 907 908 if (cpu_isar_feature(aa64_sve, cpu)) { 909 ret = kvm_arm_sve_set_vls(cs); 910 if (ret) { 911 return ret; 912 } 913 ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE); 914 if (ret) { 915 return ret; 916 } 917 } 918 919 /* 920 * KVM reports the exact PSCI version it is implementing via a 921 * special sysreg. If it is present, use its contents to determine 922 * what to report to the guest in the dtb (it is the PSCI version, 923 * in the same 15-bits major 16-bits minor format that PSCI_VERSION 924 * returns). 925 */ 926 if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) { 927 cpu->psci_version = psciver; 928 } 929 930 /* 931 * When KVM is in use, PSCI is emulated in-kernel and not by qemu. 932 * Currently KVM has its own idea about MPIDR assignment, so we 933 * override our defaults with what we get from KVM. 934 */ 935 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr); 936 if (ret) { 937 return ret; 938 } 939 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; 940 941 kvm_arm_init_debug(cs); 942 943 /* Check whether user space can specify guest syndrome value */ 944 kvm_arm_init_serror_injection(cs); 945 946 return kvm_arm_init_cpreg_list(cpu); 947 } 948 949 int kvm_arch_destroy_vcpu(CPUState *cs) 950 { 951 return 0; 952 } 953 954 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx) 955 { 956 /* Return true if the regidx is a register we should synchronize 957 * via the cpreg_tuples array (ie is not a core or sve reg that 958 * we sync by hand in kvm_arch_get/put_registers()) 959 */ 960 switch (regidx & KVM_REG_ARM_COPROC_MASK) { 961 case KVM_REG_ARM_CORE: 962 case KVM_REG_ARM64_SVE: 963 return false; 964 default: 965 return true; 966 } 967 } 968 969 typedef struct CPRegStateLevel { 970 uint64_t regidx; 971 int level; 972 } CPRegStateLevel; 973 974 /* All system registers not listed in the following table are assumed to be 975 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less 976 * often, you must add it to this table with a state of either 977 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE. 978 */ 979 static const CPRegStateLevel non_runtime_cpregs[] = { 980 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE }, 981 }; 982 983 int kvm_arm_cpreg_level(uint64_t regidx) 984 { 985 int i; 986 987 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) { 988 const CPRegStateLevel *l = &non_runtime_cpregs[i]; 989 if (l->regidx == regidx) { 990 return l->level; 991 } 992 } 993 994 return KVM_PUT_RUNTIME_STATE; 995 } 996 997 /* Callers must hold the iothread mutex lock */ 998 static void kvm_inject_arm_sea(CPUState *c) 999 { 1000 ARMCPU *cpu = ARM_CPU(c); 1001 CPUARMState *env = &cpu->env; 1002 uint32_t esr; 1003 bool same_el; 1004 1005 c->exception_index = EXCP_DATA_ABORT; 1006 env->exception.target_el = 1; 1007 1008 /* 1009 * Set the DFSC to synchronous external abort and set FnV to not valid, 1010 * this will tell guest the FAR_ELx is UNKNOWN for this abort. 1011 */ 1012 same_el = arm_current_el(env) == env->exception.target_el; 1013 esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10); 1014 1015 env->exception.syndrome = esr; 1016 1017 arm_cpu_do_interrupt(c); 1018 } 1019 1020 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ 1021 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) 1022 1023 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \ 1024 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) 1025 1026 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \ 1027 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) 1028 1029 static int kvm_arch_put_fpsimd(CPUState *cs) 1030 { 1031 CPUARMState *env = &ARM_CPU(cs)->env; 1032 struct kvm_one_reg reg; 1033 int i, ret; 1034 1035 for (i = 0; i < 32; i++) { 1036 uint64_t *q = aa64_vfp_qreg(env, i); 1037 #if HOST_BIG_ENDIAN 1038 uint64_t fp_val[2] = { q[1], q[0] }; 1039 reg.addr = (uintptr_t)fp_val; 1040 #else 1041 reg.addr = (uintptr_t)q; 1042 #endif 1043 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]); 1044 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 1045 if (ret) { 1046 return ret; 1047 } 1048 } 1049 1050 return 0; 1051 } 1052 1053 /* 1054 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits 1055 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard 1056 * code the slice index to zero for now as it's unlikely we'll need more than 1057 * one slice for quite some time. 1058 */ 1059 static int kvm_arch_put_sve(CPUState *cs) 1060 { 1061 ARMCPU *cpu = ARM_CPU(cs); 1062 CPUARMState *env = &cpu->env; 1063 uint64_t tmp[ARM_MAX_VQ * 2]; 1064 uint64_t *r; 1065 struct kvm_one_reg reg; 1066 int n, ret; 1067 1068 for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { 1069 r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2); 1070 reg.addr = (uintptr_t)r; 1071 reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0); 1072 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 1073 if (ret) { 1074 return ret; 1075 } 1076 } 1077 1078 for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { 1079 r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0], 1080 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); 1081 reg.addr = (uintptr_t)r; 1082 reg.id = KVM_REG_ARM64_SVE_PREG(n, 0); 1083 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 1084 if (ret) { 1085 return ret; 1086 } 1087 } 1088 1089 r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0], 1090 DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); 1091 reg.addr = (uintptr_t)r; 1092 reg.id = KVM_REG_ARM64_SVE_FFR(0); 1093 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 1094 if (ret) { 1095 return ret; 1096 } 1097 1098 return 0; 1099 } 1100 1101 int kvm_arch_put_registers(CPUState *cs, int level) 1102 { 1103 struct kvm_one_reg reg; 1104 uint64_t val; 1105 uint32_t fpr; 1106 int i, ret; 1107 unsigned int el; 1108 1109 ARMCPU *cpu = ARM_CPU(cs); 1110 CPUARMState *env = &cpu->env; 1111 1112 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the 1113 * AArch64 registers before pushing them out to 64-bit KVM. 1114 */ 1115 if (!is_a64(env)) { 1116 aarch64_sync_32_to_64(env); 1117 } 1118 1119 for (i = 0; i < 31; i++) { 1120 reg.id = AARCH64_CORE_REG(regs.regs[i]); 1121 reg.addr = (uintptr_t) &env->xregs[i]; 1122 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 1123 if (ret) { 1124 return ret; 1125 } 1126 } 1127 1128 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the 1129 * QEMU side we keep the current SP in xregs[31] as well. 1130 */ 1131 aarch64_save_sp(env, 1); 1132 1133 reg.id = AARCH64_CORE_REG(regs.sp); 1134 reg.addr = (uintptr_t) &env->sp_el[0]; 1135 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 1136 if (ret) { 1137 return ret; 1138 } 1139 1140 reg.id = AARCH64_CORE_REG(sp_el1); 1141 reg.addr = (uintptr_t) &env->sp_el[1]; 1142 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 1143 if (ret) { 1144 return ret; 1145 } 1146 1147 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */ 1148 if (is_a64(env)) { 1149 val = pstate_read(env); 1150 } else { 1151 val = cpsr_read(env); 1152 } 1153 reg.id = AARCH64_CORE_REG(regs.pstate); 1154 reg.addr = (uintptr_t) &val; 1155 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 1156 if (ret) { 1157 return ret; 1158 } 1159 1160 reg.id = AARCH64_CORE_REG(regs.pc); 1161 reg.addr = (uintptr_t) &env->pc; 1162 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 1163 if (ret) { 1164 return ret; 1165 } 1166 1167 reg.id = AARCH64_CORE_REG(elr_el1); 1168 reg.addr = (uintptr_t) &env->elr_el[1]; 1169 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 1170 if (ret) { 1171 return ret; 1172 } 1173 1174 /* Saved Program State Registers 1175 * 1176 * Before we restore from the banked_spsr[] array we need to 1177 * ensure that any modifications to env->spsr are correctly 1178 * reflected in the banks. 1179 */ 1180 el = arm_current_el(env); 1181 if (el > 0 && !is_a64(env)) { 1182 i = bank_number(env->uncached_cpsr & CPSR_M); 1183 env->banked_spsr[i] = env->spsr; 1184 } 1185 1186 /* KVM 0-4 map to QEMU banks 1-5 */ 1187 for (i = 0; i < KVM_NR_SPSR; i++) { 1188 reg.id = AARCH64_CORE_REG(spsr[i]); 1189 reg.addr = (uintptr_t) &env->banked_spsr[i + 1]; 1190 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 1191 if (ret) { 1192 return ret; 1193 } 1194 } 1195 1196 if (cpu_isar_feature(aa64_sve, cpu)) { 1197 ret = kvm_arch_put_sve(cs); 1198 } else { 1199 ret = kvm_arch_put_fpsimd(cs); 1200 } 1201 if (ret) { 1202 return ret; 1203 } 1204 1205 reg.addr = (uintptr_t)(&fpr); 1206 fpr = vfp_get_fpsr(env); 1207 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr); 1208 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 1209 if (ret) { 1210 return ret; 1211 } 1212 1213 reg.addr = (uintptr_t)(&fpr); 1214 fpr = vfp_get_fpcr(env); 1215 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr); 1216 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 1217 if (ret) { 1218 return ret; 1219 } 1220 1221 write_cpustate_to_list(cpu, true); 1222 1223 if (!write_list_to_kvmstate(cpu, level)) { 1224 return -EINVAL; 1225 } 1226 1227 /* 1228 * Setting VCPU events should be triggered after syncing the registers 1229 * to avoid overwriting potential changes made by KVM upon calling 1230 * KVM_SET_VCPU_EVENTS ioctl 1231 */ 1232 ret = kvm_put_vcpu_events(cpu); 1233 if (ret) { 1234 return ret; 1235 } 1236 1237 kvm_arm_sync_mpstate_to_kvm(cpu); 1238 1239 return ret; 1240 } 1241 1242 static int kvm_arch_get_fpsimd(CPUState *cs) 1243 { 1244 CPUARMState *env = &ARM_CPU(cs)->env; 1245 struct kvm_one_reg reg; 1246 int i, ret; 1247 1248 for (i = 0; i < 32; i++) { 1249 uint64_t *q = aa64_vfp_qreg(env, i); 1250 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]); 1251 reg.addr = (uintptr_t)q; 1252 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 1253 if (ret) { 1254 return ret; 1255 } else { 1256 #if HOST_BIG_ENDIAN 1257 uint64_t t; 1258 t = q[0], q[0] = q[1], q[1] = t; 1259 #endif 1260 } 1261 } 1262 1263 return 0; 1264 } 1265 1266 /* 1267 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits 1268 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard 1269 * code the slice index to zero for now as it's unlikely we'll need more than 1270 * one slice for quite some time. 1271 */ 1272 static int kvm_arch_get_sve(CPUState *cs) 1273 { 1274 ARMCPU *cpu = ARM_CPU(cs); 1275 CPUARMState *env = &cpu->env; 1276 struct kvm_one_reg reg; 1277 uint64_t *r; 1278 int n, ret; 1279 1280 for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { 1281 r = &env->vfp.zregs[n].d[0]; 1282 reg.addr = (uintptr_t)r; 1283 reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0); 1284 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 1285 if (ret) { 1286 return ret; 1287 } 1288 sve_bswap64(r, r, cpu->sve_max_vq * 2); 1289 } 1290 1291 for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { 1292 r = &env->vfp.pregs[n].p[0]; 1293 reg.addr = (uintptr_t)r; 1294 reg.id = KVM_REG_ARM64_SVE_PREG(n, 0); 1295 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 1296 if (ret) { 1297 return ret; 1298 } 1299 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); 1300 } 1301 1302 r = &env->vfp.pregs[FFR_PRED_NUM].p[0]; 1303 reg.addr = (uintptr_t)r; 1304 reg.id = KVM_REG_ARM64_SVE_FFR(0); 1305 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 1306 if (ret) { 1307 return ret; 1308 } 1309 sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); 1310 1311 return 0; 1312 } 1313 1314 int kvm_arch_get_registers(CPUState *cs) 1315 { 1316 struct kvm_one_reg reg; 1317 uint64_t val; 1318 unsigned int el; 1319 uint32_t fpr; 1320 int i, ret; 1321 1322 ARMCPU *cpu = ARM_CPU(cs); 1323 CPUARMState *env = &cpu->env; 1324 1325 for (i = 0; i < 31; i++) { 1326 reg.id = AARCH64_CORE_REG(regs.regs[i]); 1327 reg.addr = (uintptr_t) &env->xregs[i]; 1328 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 1329 if (ret) { 1330 return ret; 1331 } 1332 } 1333 1334 reg.id = AARCH64_CORE_REG(regs.sp); 1335 reg.addr = (uintptr_t) &env->sp_el[0]; 1336 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 1337 if (ret) { 1338 return ret; 1339 } 1340 1341 reg.id = AARCH64_CORE_REG(sp_el1); 1342 reg.addr = (uintptr_t) &env->sp_el[1]; 1343 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 1344 if (ret) { 1345 return ret; 1346 } 1347 1348 reg.id = AARCH64_CORE_REG(regs.pstate); 1349 reg.addr = (uintptr_t) &val; 1350 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 1351 if (ret) { 1352 return ret; 1353 } 1354 1355 env->aarch64 = ((val & PSTATE_nRW) == 0); 1356 if (is_a64(env)) { 1357 pstate_write(env, val); 1358 } else { 1359 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw); 1360 } 1361 1362 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the 1363 * QEMU side we keep the current SP in xregs[31] as well. 1364 */ 1365 aarch64_restore_sp(env, 1); 1366 1367 reg.id = AARCH64_CORE_REG(regs.pc); 1368 reg.addr = (uintptr_t) &env->pc; 1369 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 1370 if (ret) { 1371 return ret; 1372 } 1373 1374 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the 1375 * incoming AArch64 regs received from 64-bit KVM. 1376 * We must perform this after all of the registers have been acquired from 1377 * the kernel. 1378 */ 1379 if (!is_a64(env)) { 1380 aarch64_sync_64_to_32(env); 1381 } 1382 1383 reg.id = AARCH64_CORE_REG(elr_el1); 1384 reg.addr = (uintptr_t) &env->elr_el[1]; 1385 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 1386 if (ret) { 1387 return ret; 1388 } 1389 1390 /* Fetch the SPSR registers 1391 * 1392 * KVM SPSRs 0-4 map to QEMU banks 1-5 1393 */ 1394 for (i = 0; i < KVM_NR_SPSR; i++) { 1395 reg.id = AARCH64_CORE_REG(spsr[i]); 1396 reg.addr = (uintptr_t) &env->banked_spsr[i + 1]; 1397 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 1398 if (ret) { 1399 return ret; 1400 } 1401 } 1402 1403 el = arm_current_el(env); 1404 if (el > 0 && !is_a64(env)) { 1405 i = bank_number(env->uncached_cpsr & CPSR_M); 1406 env->spsr = env->banked_spsr[i]; 1407 } 1408 1409 if (cpu_isar_feature(aa64_sve, cpu)) { 1410 ret = kvm_arch_get_sve(cs); 1411 } else { 1412 ret = kvm_arch_get_fpsimd(cs); 1413 } 1414 if (ret) { 1415 return ret; 1416 } 1417 1418 reg.addr = (uintptr_t)(&fpr); 1419 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr); 1420 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 1421 if (ret) { 1422 return ret; 1423 } 1424 vfp_set_fpsr(env, fpr); 1425 1426 reg.addr = (uintptr_t)(&fpr); 1427 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr); 1428 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 1429 if (ret) { 1430 return ret; 1431 } 1432 vfp_set_fpcr(env, fpr); 1433 1434 ret = kvm_get_vcpu_events(cpu); 1435 if (ret) { 1436 return ret; 1437 } 1438 1439 if (!write_kvmstate_to_list(cpu)) { 1440 return -EINVAL; 1441 } 1442 /* Note that it's OK to have registers which aren't in CPUState, 1443 * so we can ignore a failure return here. 1444 */ 1445 write_list_to_cpustate(cpu); 1446 1447 kvm_arm_sync_mpstate_to_qemu(cpu); 1448 1449 /* TODO: other registers */ 1450 return ret; 1451 } 1452 1453 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) 1454 { 1455 ram_addr_t ram_addr; 1456 hwaddr paddr; 1457 1458 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); 1459 1460 if (acpi_ghes_present() && addr) { 1461 ram_addr = qemu_ram_addr_from_host(addr); 1462 if (ram_addr != RAM_ADDR_INVALID && 1463 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { 1464 kvm_hwpoison_page_add(ram_addr); 1465 /* 1466 * If this is a BUS_MCEERR_AR, we know we have been called 1467 * synchronously from the vCPU thread, so we can easily 1468 * synchronize the state and inject an error. 1469 * 1470 * TODO: we currently don't tell the guest at all about 1471 * BUS_MCEERR_AO. In that case we might either be being 1472 * called synchronously from the vCPU thread, or a bit 1473 * later from the main thread, so doing the injection of 1474 * the error would be more complicated. 1475 */ 1476 if (code == BUS_MCEERR_AR) { 1477 kvm_cpu_synchronize_state(c); 1478 if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) { 1479 kvm_inject_arm_sea(c); 1480 } else { 1481 error_report("failed to record the error"); 1482 abort(); 1483 } 1484 } 1485 return; 1486 } 1487 if (code == BUS_MCEERR_AO) { 1488 error_report("Hardware memory error at addr %p for memory used by " 1489 "QEMU itself instead of guest system!", addr); 1490 } 1491 } 1492 1493 if (code == BUS_MCEERR_AR) { 1494 error_report("Hardware memory error!"); 1495 exit(1); 1496 } 1497 } 1498 1499 /* C6.6.29 BRK instruction */ 1500 static const uint32_t brk_insn = 0xd4200000; 1501 1502 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 1503 { 1504 if (have_guest_debug) { 1505 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || 1506 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { 1507 return -EINVAL; 1508 } 1509 return 0; 1510 } else { 1511 error_report("guest debug not supported on this kernel"); 1512 return -EINVAL; 1513 } 1514 } 1515 1516 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 1517 { 1518 static uint32_t brk; 1519 1520 if (have_guest_debug) { 1521 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || 1522 brk != brk_insn || 1523 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { 1524 return -EINVAL; 1525 } 1526 return 0; 1527 } else { 1528 error_report("guest debug not supported on this kernel"); 1529 return -EINVAL; 1530 } 1531 } 1532 1533 /* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register 1534 * 1535 * To minimise translating between kernel and user-space the kernel 1536 * ABI just provides user-space with the full exception syndrome 1537 * register value to be decoded in QEMU. 1538 */ 1539 1540 bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) 1541 { 1542 int hsr_ec = syn_get_ec(debug_exit->hsr); 1543 ARMCPU *cpu = ARM_CPU(cs); 1544 CPUARMState *env = &cpu->env; 1545 1546 /* Ensure PC is synchronised */ 1547 kvm_cpu_synchronize_state(cs); 1548 1549 switch (hsr_ec) { 1550 case EC_SOFTWARESTEP: 1551 if (cs->singlestep_enabled) { 1552 return true; 1553 } else { 1554 /* 1555 * The kernel should have suppressed the guest's ability to 1556 * single step at this point so something has gone wrong. 1557 */ 1558 error_report("%s: guest single-step while debugging unsupported" 1559 " (%"PRIx64", %"PRIx32")", 1560 __func__, env->pc, debug_exit->hsr); 1561 return false; 1562 } 1563 break; 1564 case EC_AA64_BKPT: 1565 if (kvm_find_sw_breakpoint(cs, env->pc)) { 1566 return true; 1567 } 1568 break; 1569 case EC_BREAKPOINT: 1570 if (find_hw_breakpoint(cs, env->pc)) { 1571 return true; 1572 } 1573 break; 1574 case EC_WATCHPOINT: 1575 { 1576 CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far); 1577 if (wp) { 1578 cs->watchpoint_hit = wp; 1579 return true; 1580 } 1581 break; 1582 } 1583 default: 1584 error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")", 1585 __func__, debug_exit->hsr, env->pc); 1586 } 1587 1588 /* If we are not handling the debug exception it must belong to 1589 * the guest. Let's re-use the existing TCG interrupt code to set 1590 * everything up properly. 1591 */ 1592 cs->exception_index = EXCP_BKPT; 1593 env->exception.syndrome = debug_exit->hsr; 1594 env->exception.vaddress = debug_exit->far; 1595 env->exception.target_el = 1; 1596 qemu_mutex_lock_iothread(); 1597 arm_cpu_do_interrupt(cs); 1598 qemu_mutex_unlock_iothread(); 1599 1600 return false; 1601 } 1602 1603 #define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0) 1604 #define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2) 1605 1606 /* 1607 * ESR_EL1 1608 * ISS encoding 1609 * AARCH64: DFSC, bits [5:0] 1610 * AARCH32: 1611 * TTBCR.EAE == 0 1612 * FS[4] - DFSR[10] 1613 * FS[3:0] - DFSR[3:0] 1614 * TTBCR.EAE == 1 1615 * FS, bits [5:0] 1616 */ 1617 #define ESR_DFSC(aarch64, lpae, v) \ 1618 ((aarch64 || (lpae)) ? ((v) & 0x3F) \ 1619 : (((v) >> 6) | ((v) & 0x1F))) 1620 1621 #define ESR_DFSC_EXTABT(aarch64, lpae) \ 1622 ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8) 1623 1624 bool kvm_arm_verify_ext_dabt_pending(CPUState *cs) 1625 { 1626 uint64_t dfsr_val; 1627 1628 if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) { 1629 ARMCPU *cpu = ARM_CPU(cs); 1630 CPUARMState *env = &cpu->env; 1631 int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64); 1632 int lpae = 0; 1633 1634 if (!aarch64_mode) { 1635 uint64_t ttbcr; 1636 1637 if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) { 1638 lpae = arm_feature(env, ARM_FEATURE_LPAE) 1639 && (ttbcr & TTBCR_EAE); 1640 } 1641 } 1642 /* 1643 * The verification here is based on the DFSC bits 1644 * of the ESR_EL1 reg only 1645 */ 1646 return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) == 1647 ESR_DFSC_EXTABT(aarch64_mode, lpae)); 1648 } 1649 return false; 1650 } 1651