1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/kvm/guest.c: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #include <linux/bits.h> 12 #include <linux/errno.h> 13 #include <linux/err.h> 14 #include <linux/nospec.h> 15 #include <linux/kvm_host.h> 16 #include <linux/module.h> 17 #include <linux/stddef.h> 18 #include <linux/string.h> 19 #include <linux/vmalloc.h> 20 #include <linux/fs.h> 21 #include <kvm/arm_psci.h> 22 #include <asm/cputype.h> 23 #include <linux/uaccess.h> 24 #include <asm/fpsimd.h> 25 #include <asm/kvm.h> 26 #include <asm/kvm_emulate.h> 27 #include <asm/kvm_coproc.h> 28 #include <asm/kvm_host.h> 29 #include <asm/sigcontext.h> 30 31 #include "trace.h" 32 33 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } 34 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } 35 36 struct kvm_stats_debugfs_item debugfs_entries[] = { 37 VCPU_STAT(halt_successful_poll), 38 VCPU_STAT(halt_attempted_poll), 39 VCPU_STAT(halt_poll_invalid), 40 VCPU_STAT(halt_wakeup), 41 VCPU_STAT(hvc_exit_stat), 42 VCPU_STAT(wfe_exit_stat), 43 VCPU_STAT(wfi_exit_stat), 44 VCPU_STAT(mmio_exit_user), 45 VCPU_STAT(mmio_exit_kernel), 46 VCPU_STAT(exits), 47 { NULL } 48 }; 49 50 static bool core_reg_offset_is_vreg(u64 off) 51 { 52 return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) && 53 off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr); 54 } 55 56 static u64 core_reg_offset_from_id(u64 id) 57 { 58 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); 59 } 60 61 static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off) 62 { 63 int size; 64 65 switch (off) { 66 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ... 67 KVM_REG_ARM_CORE_REG(regs.regs[30]): 68 case KVM_REG_ARM_CORE_REG(regs.sp): 69 case KVM_REG_ARM_CORE_REG(regs.pc): 70 case KVM_REG_ARM_CORE_REG(regs.pstate): 71 case KVM_REG_ARM_CORE_REG(sp_el1): 72 case KVM_REG_ARM_CORE_REG(elr_el1): 73 case KVM_REG_ARM_CORE_REG(spsr[0]) ... 74 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]): 75 size = sizeof(__u64); 76 break; 77 78 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... 79 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): 80 size = sizeof(__uint128_t); 81 break; 82 83 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): 84 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): 85 size = sizeof(__u32); 86 break; 87 88 default: 89 return -EINVAL; 90 } 91 92 if (!IS_ALIGNED(off, size / sizeof(__u32))) 93 return -EINVAL; 94 95 /* 96 * The KVM_REG_ARM64_SVE regs must be used instead of 97 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on 98 * SVE-enabled vcpus: 99 */ 100 if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off)) 101 return -EINVAL; 102 103 return size; 104 } 105 106 static int validate_core_offset(const struct kvm_vcpu *vcpu, 107 const struct kvm_one_reg *reg) 108 { 109 u64 off = core_reg_offset_from_id(reg->id); 110 int size = core_reg_size_from_offset(vcpu, off); 111 112 if (size < 0) 113 return -EINVAL; 114 115 if (KVM_REG_SIZE(reg->id) != size) 116 return -EINVAL; 117 118 return 0; 119 } 120 121 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 122 { 123 /* 124 * Because the kvm_regs structure is a mix of 32, 64 and 125 * 128bit fields, we index it as if it was a 32bit 126 * array. Hence below, nr_regs is the number of entries, and 127 * off the index in the "array". 128 */ 129 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; 130 struct kvm_regs *regs = vcpu_gp_regs(vcpu); 131 int nr_regs = sizeof(*regs) / sizeof(__u32); 132 u32 off; 133 134 /* Our ID is an index into the kvm_regs struct. */ 135 off = core_reg_offset_from_id(reg->id); 136 if (off >= nr_regs || 137 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) 138 return -ENOENT; 139 140 if (validate_core_offset(vcpu, reg)) 141 return -EINVAL; 142 143 if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) 144 return -EFAULT; 145 146 return 0; 147 } 148 149 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 150 { 151 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; 152 struct kvm_regs *regs = vcpu_gp_regs(vcpu); 153 int nr_regs = sizeof(*regs) / sizeof(__u32); 154 __uint128_t tmp; 155 void *valp = &tmp; 156 u64 off; 157 int err = 0; 158 159 /* Our ID is an index into the kvm_regs struct. */ 160 off = core_reg_offset_from_id(reg->id); 161 if (off >= nr_regs || 162 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) 163 return -ENOENT; 164 165 if (validate_core_offset(vcpu, reg)) 166 return -EINVAL; 167 168 if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) 169 return -EINVAL; 170 171 if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) { 172 err = -EFAULT; 173 goto out; 174 } 175 176 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { 177 u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK; 178 switch (mode) { 179 case PSR_AA32_MODE_USR: 180 if (!system_supports_32bit_el0()) 181 return -EINVAL; 182 break; 183 case PSR_AA32_MODE_FIQ: 184 case PSR_AA32_MODE_IRQ: 185 case PSR_AA32_MODE_SVC: 186 case PSR_AA32_MODE_ABT: 187 case PSR_AA32_MODE_UND: 188 if (!vcpu_el1_is_32bit(vcpu)) 189 return -EINVAL; 190 break; 191 case PSR_MODE_EL0t: 192 case PSR_MODE_EL1t: 193 case PSR_MODE_EL1h: 194 if (vcpu_el1_is_32bit(vcpu)) 195 return -EINVAL; 196 break; 197 default: 198 err = -EINVAL; 199 goto out; 200 } 201 } 202 203 memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); 204 out: 205 return err; 206 } 207 208 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) 209 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) 210 #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq))) 211 212 static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 213 { 214 unsigned int max_vq, vq; 215 u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; 216 217 if (!vcpu_has_sve(vcpu)) 218 return -ENOENT; 219 220 if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl))) 221 return -EINVAL; 222 223 memset(vqs, 0, sizeof(vqs)); 224 225 max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); 226 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) 227 if (sve_vq_available(vq)) 228 vqs[vq_word(vq)] |= vq_mask(vq); 229 230 if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs))) 231 return -EFAULT; 232 233 return 0; 234 } 235 236 static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 237 { 238 unsigned int max_vq, vq; 239 u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; 240 241 if (!vcpu_has_sve(vcpu)) 242 return -ENOENT; 243 244 if (kvm_arm_vcpu_sve_finalized(vcpu)) 245 return -EPERM; /* too late! */ 246 247 if (WARN_ON(vcpu->arch.sve_state)) 248 return -EINVAL; 249 250 if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs))) 251 return -EFAULT; 252 253 max_vq = 0; 254 for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq) 255 if (vq_present(vqs, vq)) 256 max_vq = vq; 257 258 if (max_vq > sve_vq_from_vl(kvm_sve_max_vl)) 259 return -EINVAL; 260 261 /* 262 * Vector lengths supported by the host can't currently be 263 * hidden from the guest individually: instead we can only set a 264 * maxmium via ZCR_EL2.LEN. So, make sure the available vector 265 * lengths match the set requested exactly up to the requested 266 * maximum: 267 */ 268 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) 269 if (vq_present(vqs, vq) != sve_vq_available(vq)) 270 return -EINVAL; 271 272 /* Can't run with no vector lengths at all: */ 273 if (max_vq < SVE_VQ_MIN) 274 return -EINVAL; 275 276 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */ 277 vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq); 278 279 return 0; 280 } 281 282 #define SVE_REG_SLICE_SHIFT 0 283 #define SVE_REG_SLICE_BITS 5 284 #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS) 285 #define SVE_REG_ID_BITS 5 286 287 #define SVE_REG_SLICE_MASK \ 288 GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \ 289 SVE_REG_SLICE_SHIFT) 290 #define SVE_REG_ID_MASK \ 291 GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT) 292 293 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS) 294 295 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0)) 296 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0)) 297 298 /* 299 * Number of register slices required to cover each whole SVE register. 300 * NOTE: Only the first slice every exists, for now. 301 * If you are tempted to modify this, you must also rework sve_reg_to_region() 302 * to match: 303 */ 304 #define vcpu_sve_slices(vcpu) 1 305 306 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */ 307 struct sve_state_reg_region { 308 unsigned int koffset; /* offset into sve_state in kernel memory */ 309 unsigned int klen; /* length in kernel memory */ 310 unsigned int upad; /* extra trailing padding in user memory */ 311 }; 312 313 /* 314 * Validate SVE register ID and get sanitised bounds for user/kernel SVE 315 * register copy 316 */ 317 static int sve_reg_to_region(struct sve_state_reg_region *region, 318 struct kvm_vcpu *vcpu, 319 const struct kvm_one_reg *reg) 320 { 321 /* reg ID ranges for Z- registers */ 322 const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0); 323 const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1, 324 SVE_NUM_SLICES - 1); 325 326 /* reg ID ranges for P- registers and FFR (which are contiguous) */ 327 const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0); 328 const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1); 329 330 unsigned int vq; 331 unsigned int reg_num; 332 333 unsigned int reqoffset, reqlen; /* User-requested offset and length */ 334 unsigned int maxlen; /* Maxmimum permitted length */ 335 336 size_t sve_state_size; 337 338 const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1, 339 SVE_NUM_SLICES - 1); 340 341 /* Verify that the P-regs and FFR really do have contiguous IDs: */ 342 BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1); 343 344 /* Verify that we match the UAPI header: */ 345 BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES); 346 347 reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT; 348 349 if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) { 350 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) 351 return -ENOENT; 352 353 vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); 354 355 reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) - 356 SVE_SIG_REGS_OFFSET; 357 reqlen = KVM_SVE_ZREG_SIZE; 358 maxlen = SVE_SIG_ZREG_SIZE(vq); 359 } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) { 360 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) 361 return -ENOENT; 362 363 vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); 364 365 reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) - 366 SVE_SIG_REGS_OFFSET; 367 reqlen = KVM_SVE_PREG_SIZE; 368 maxlen = SVE_SIG_PREG_SIZE(vq); 369 } else { 370 return -EINVAL; 371 } 372 373 sve_state_size = vcpu_sve_state_size(vcpu); 374 if (WARN_ON(!sve_state_size)) 375 return -EINVAL; 376 377 region->koffset = array_index_nospec(reqoffset, sve_state_size); 378 region->klen = min(maxlen, reqlen); 379 region->upad = reqlen - region->klen; 380 381 return 0; 382 } 383 384 static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 385 { 386 int ret; 387 struct sve_state_reg_region region; 388 char __user *uptr = (char __user *)reg->addr; 389 390 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ 391 if (reg->id == KVM_REG_ARM64_SVE_VLS) 392 return get_sve_vls(vcpu, reg); 393 394 /* Try to interpret reg ID as an architectural SVE register... */ 395 ret = sve_reg_to_region(®ion, vcpu, reg); 396 if (ret) 397 return ret; 398 399 if (!kvm_arm_vcpu_sve_finalized(vcpu)) 400 return -EPERM; 401 402 if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, 403 region.klen) || 404 clear_user(uptr + region.klen, region.upad)) 405 return -EFAULT; 406 407 return 0; 408 } 409 410 static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 411 { 412 int ret; 413 struct sve_state_reg_region region; 414 const char __user *uptr = (const char __user *)reg->addr; 415 416 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ 417 if (reg->id == KVM_REG_ARM64_SVE_VLS) 418 return set_sve_vls(vcpu, reg); 419 420 /* Try to interpret reg ID as an architectural SVE register... */ 421 ret = sve_reg_to_region(®ion, vcpu, reg); 422 if (ret) 423 return ret; 424 425 if (!kvm_arm_vcpu_sve_finalized(vcpu)) 426 return -EPERM; 427 428 if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, 429 region.klen)) 430 return -EFAULT; 431 432 return 0; 433 } 434 435 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 436 { 437 return -EINVAL; 438 } 439 440 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 441 { 442 return -EINVAL; 443 } 444 445 static int copy_core_reg_indices(const struct kvm_vcpu *vcpu, 446 u64 __user *uindices) 447 { 448 unsigned int i; 449 int n = 0; 450 451 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { 452 u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i; 453 int size = core_reg_size_from_offset(vcpu, i); 454 455 if (size < 0) 456 continue; 457 458 switch (size) { 459 case sizeof(__u32): 460 reg |= KVM_REG_SIZE_U32; 461 break; 462 463 case sizeof(__u64): 464 reg |= KVM_REG_SIZE_U64; 465 break; 466 467 case sizeof(__uint128_t): 468 reg |= KVM_REG_SIZE_U128; 469 break; 470 471 default: 472 WARN_ON(1); 473 continue; 474 } 475 476 if (uindices) { 477 if (put_user(reg, uindices)) 478 return -EFAULT; 479 uindices++; 480 } 481 482 n++; 483 } 484 485 return n; 486 } 487 488 static unsigned long num_core_regs(const struct kvm_vcpu *vcpu) 489 { 490 return copy_core_reg_indices(vcpu, NULL); 491 } 492 493 /** 494 * ARM64 versions of the TIMER registers, always available on arm64 495 */ 496 497 #define NUM_TIMER_REGS 3 498 499 static bool is_timer_reg(u64 index) 500 { 501 switch (index) { 502 case KVM_REG_ARM_TIMER_CTL: 503 case KVM_REG_ARM_TIMER_CNT: 504 case KVM_REG_ARM_TIMER_CVAL: 505 return true; 506 } 507 return false; 508 } 509 510 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 511 { 512 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) 513 return -EFAULT; 514 uindices++; 515 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) 516 return -EFAULT; 517 uindices++; 518 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) 519 return -EFAULT; 520 521 return 0; 522 } 523 524 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 525 { 526 void __user *uaddr = (void __user *)(long)reg->addr; 527 u64 val; 528 int ret; 529 530 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); 531 if (ret != 0) 532 return -EFAULT; 533 534 return kvm_arm_timer_set_reg(vcpu, reg->id, val); 535 } 536 537 static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 538 { 539 void __user *uaddr = (void __user *)(long)reg->addr; 540 u64 val; 541 542 val = kvm_arm_timer_get_reg(vcpu, reg->id); 543 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; 544 } 545 546 static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) 547 { 548 const unsigned int slices = vcpu_sve_slices(vcpu); 549 550 if (!vcpu_has_sve(vcpu)) 551 return 0; 552 553 /* Policed by KVM_GET_REG_LIST: */ 554 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); 555 556 return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */) 557 + 1; /* KVM_REG_ARM64_SVE_VLS */ 558 } 559 560 static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, 561 u64 __user *uindices) 562 { 563 const unsigned int slices = vcpu_sve_slices(vcpu); 564 u64 reg; 565 unsigned int i, n; 566 int num_regs = 0; 567 568 if (!vcpu_has_sve(vcpu)) 569 return 0; 570 571 /* Policed by KVM_GET_REG_LIST: */ 572 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); 573 574 /* 575 * Enumerate this first, so that userspace can save/restore in 576 * the order reported by KVM_GET_REG_LIST: 577 */ 578 reg = KVM_REG_ARM64_SVE_VLS; 579 if (put_user(reg, uindices++)) 580 return -EFAULT; 581 ++num_regs; 582 583 for (i = 0; i < slices; i++) { 584 for (n = 0; n < SVE_NUM_ZREGS; n++) { 585 reg = KVM_REG_ARM64_SVE_ZREG(n, i); 586 if (put_user(reg, uindices++)) 587 return -EFAULT; 588 num_regs++; 589 } 590 591 for (n = 0; n < SVE_NUM_PREGS; n++) { 592 reg = KVM_REG_ARM64_SVE_PREG(n, i); 593 if (put_user(reg, uindices++)) 594 return -EFAULT; 595 num_regs++; 596 } 597 598 reg = KVM_REG_ARM64_SVE_FFR(i); 599 if (put_user(reg, uindices++)) 600 return -EFAULT; 601 num_regs++; 602 } 603 604 return num_regs; 605 } 606 607 /** 608 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG 609 * 610 * This is for all registers. 611 */ 612 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 613 { 614 unsigned long res = 0; 615 616 res += num_core_regs(vcpu); 617 res += num_sve_regs(vcpu); 618 res += kvm_arm_num_sys_reg_descs(vcpu); 619 res += kvm_arm_get_fw_num_regs(vcpu); 620 res += NUM_TIMER_REGS; 621 622 return res; 623 } 624 625 /** 626 * kvm_arm_copy_reg_indices - get indices of all registers. 627 * 628 * We do core registers right here, then we append system regs. 629 */ 630 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 631 { 632 int ret; 633 634 ret = copy_core_reg_indices(vcpu, uindices); 635 if (ret < 0) 636 return ret; 637 uindices += ret; 638 639 ret = copy_sve_reg_indices(vcpu, uindices); 640 if (ret < 0) 641 return ret; 642 uindices += ret; 643 644 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); 645 if (ret < 0) 646 return ret; 647 uindices += kvm_arm_get_fw_num_regs(vcpu); 648 649 ret = copy_timer_indices(vcpu, uindices); 650 if (ret < 0) 651 return ret; 652 uindices += NUM_TIMER_REGS; 653 654 return kvm_arm_copy_sys_reg_indices(vcpu, uindices); 655 } 656 657 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 658 { 659 /* We currently use nothing arch-specific in upper 32 bits */ 660 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) 661 return -EINVAL; 662 663 switch (reg->id & KVM_REG_ARM_COPROC_MASK) { 664 case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg); 665 case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg); 666 case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg); 667 } 668 669 if (is_timer_reg(reg->id)) 670 return get_timer_reg(vcpu, reg); 671 672 return kvm_arm_sys_reg_get_reg(vcpu, reg); 673 } 674 675 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 676 { 677 /* We currently use nothing arch-specific in upper 32 bits */ 678 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) 679 return -EINVAL; 680 681 switch (reg->id & KVM_REG_ARM_COPROC_MASK) { 682 case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg); 683 case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg); 684 case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg); 685 } 686 687 if (is_timer_reg(reg->id)) 688 return set_timer_reg(vcpu, reg); 689 690 return kvm_arm_sys_reg_set_reg(vcpu, reg); 691 } 692 693 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 694 struct kvm_sregs *sregs) 695 { 696 return -EINVAL; 697 } 698 699 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 700 struct kvm_sregs *sregs) 701 { 702 return -EINVAL; 703 } 704 705 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 706 struct kvm_vcpu_events *events) 707 { 708 events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); 709 events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); 710 711 if (events->exception.serror_pending && events->exception.serror_has_esr) 712 events->exception.serror_esr = vcpu_get_vsesr(vcpu); 713 714 /* 715 * We never return a pending ext_dabt here because we deliver it to 716 * the virtual CPU directly when setting the event and it's no longer 717 * 'pending' at this point. 718 */ 719 720 return 0; 721 } 722 723 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 724 struct kvm_vcpu_events *events) 725 { 726 bool serror_pending = events->exception.serror_pending; 727 bool has_esr = events->exception.serror_has_esr; 728 bool ext_dabt_pending = events->exception.ext_dabt_pending; 729 730 if (serror_pending && has_esr) { 731 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) 732 return -EINVAL; 733 734 if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK)) 735 kvm_set_sei_esr(vcpu, events->exception.serror_esr); 736 else 737 return -EINVAL; 738 } else if (serror_pending) { 739 kvm_inject_vabt(vcpu); 740 } 741 742 if (ext_dabt_pending) 743 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); 744 745 return 0; 746 } 747 748 int __attribute_const__ kvm_target_cpu(void) 749 { 750 unsigned long implementor = read_cpuid_implementor(); 751 unsigned long part_number = read_cpuid_part_number(); 752 753 switch (implementor) { 754 case ARM_CPU_IMP_ARM: 755 switch (part_number) { 756 case ARM_CPU_PART_AEM_V8: 757 return KVM_ARM_TARGET_AEM_V8; 758 case ARM_CPU_PART_FOUNDATION: 759 return KVM_ARM_TARGET_FOUNDATION_V8; 760 case ARM_CPU_PART_CORTEX_A53: 761 return KVM_ARM_TARGET_CORTEX_A53; 762 case ARM_CPU_PART_CORTEX_A57: 763 return KVM_ARM_TARGET_CORTEX_A57; 764 } 765 break; 766 case ARM_CPU_IMP_APM: 767 switch (part_number) { 768 case APM_CPU_PART_POTENZA: 769 return KVM_ARM_TARGET_XGENE_POTENZA; 770 } 771 break; 772 } 773 774 /* Return a default generic target */ 775 return KVM_ARM_TARGET_GENERIC_V8; 776 } 777 778 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) 779 { 780 int target = kvm_target_cpu(); 781 782 if (target < 0) 783 return -ENODEV; 784 785 memset(init, 0, sizeof(*init)); 786 787 /* 788 * For now, we don't return any features. 789 * In future, we might use features to return target 790 * specific features available for the preferred 791 * target type. 792 */ 793 init->target = (__u32)target; 794 795 return 0; 796 } 797 798 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 799 { 800 return -EINVAL; 801 } 802 803 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 804 { 805 return -EINVAL; 806 } 807 808 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 809 struct kvm_translation *tr) 810 { 811 return -EINVAL; 812 } 813 814 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ 815 KVM_GUESTDBG_USE_SW_BP | \ 816 KVM_GUESTDBG_USE_HW | \ 817 KVM_GUESTDBG_SINGLESTEP) 818 819 /** 820 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging 821 * @kvm: pointer to the KVM struct 822 * @kvm_guest_debug: the ioctl data buffer 823 * 824 * This sets up and enables the VM for guest debugging. Userspace 825 * passes in a control flag to enable different debug types and 826 * potentially other architecture specific information in the rest of 827 * the structure. 828 */ 829 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 830 struct kvm_guest_debug *dbg) 831 { 832 int ret = 0; 833 834 trace_kvm_set_guest_debug(vcpu, dbg->control); 835 836 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { 837 ret = -EINVAL; 838 goto out; 839 } 840 841 if (dbg->control & KVM_GUESTDBG_ENABLE) { 842 vcpu->guest_debug = dbg->control; 843 844 /* Hardware assisted Break and Watch points */ 845 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) { 846 vcpu->arch.external_debug_state = dbg->arch; 847 } 848 849 } else { 850 /* If not enabled clear all flags */ 851 vcpu->guest_debug = 0; 852 } 853 854 out: 855 return ret; 856 } 857 858 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 859 struct kvm_device_attr *attr) 860 { 861 int ret; 862 863 switch (attr->group) { 864 case KVM_ARM_VCPU_PMU_V3_CTRL: 865 ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); 866 break; 867 case KVM_ARM_VCPU_TIMER_CTRL: 868 ret = kvm_arm_timer_set_attr(vcpu, attr); 869 break; 870 case KVM_ARM_VCPU_PVTIME_CTRL: 871 ret = kvm_arm_pvtime_set_attr(vcpu, attr); 872 break; 873 default: 874 ret = -ENXIO; 875 break; 876 } 877 878 return ret; 879 } 880 881 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 882 struct kvm_device_attr *attr) 883 { 884 int ret; 885 886 switch (attr->group) { 887 case KVM_ARM_VCPU_PMU_V3_CTRL: 888 ret = kvm_arm_pmu_v3_get_attr(vcpu, attr); 889 break; 890 case KVM_ARM_VCPU_TIMER_CTRL: 891 ret = kvm_arm_timer_get_attr(vcpu, attr); 892 break; 893 case KVM_ARM_VCPU_PVTIME_CTRL: 894 ret = kvm_arm_pvtime_get_attr(vcpu, attr); 895 break; 896 default: 897 ret = -ENXIO; 898 break; 899 } 900 901 return ret; 902 } 903 904 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 905 struct kvm_device_attr *attr) 906 { 907 int ret; 908 909 switch (attr->group) { 910 case KVM_ARM_VCPU_PMU_V3_CTRL: 911 ret = kvm_arm_pmu_v3_has_attr(vcpu, attr); 912 break; 913 case KVM_ARM_VCPU_TIMER_CTRL: 914 ret = kvm_arm_timer_has_attr(vcpu, attr); 915 break; 916 case KVM_ARM_VCPU_PVTIME_CTRL: 917 ret = kvm_arm_pvtime_has_attr(vcpu, attr); 918 break; 919 default: 920 ret = -ENXIO; 921 break; 922 } 923 924 return ret; 925 } 926