1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/kvm/guest.c: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #include <linux/bits.h> 12 #include <linux/errno.h> 13 #include <linux/err.h> 14 #include <linux/nospec.h> 15 #include <linux/kvm_host.h> 16 #include <linux/module.h> 17 #include <linux/stddef.h> 18 #include <linux/string.h> 19 #include <linux/vmalloc.h> 20 #include <linux/fs.h> 21 #include <kvm/arm_psci.h> 22 #include <asm/cputype.h> 23 #include <linux/uaccess.h> 24 #include <asm/fpsimd.h> 25 #include <asm/kvm.h> 26 #include <asm/kvm_emulate.h> 27 #include <asm/kvm_coproc.h> 28 #include <asm/sigcontext.h> 29 30 #include "trace.h" 31 32 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } 33 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } 34 35 struct kvm_stats_debugfs_item debugfs_entries[] = { 36 VCPU_STAT(halt_successful_poll), 37 VCPU_STAT(halt_attempted_poll), 38 VCPU_STAT(halt_poll_invalid), 39 VCPU_STAT(halt_wakeup), 40 VCPU_STAT(hvc_exit_stat), 41 VCPU_STAT(wfe_exit_stat), 42 VCPU_STAT(wfi_exit_stat), 43 VCPU_STAT(mmio_exit_user), 44 VCPU_STAT(mmio_exit_kernel), 45 VCPU_STAT(exits), 46 { NULL } 47 }; 48 49 static bool core_reg_offset_is_vreg(u64 off) 50 { 51 return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) && 52 off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr); 53 } 54 55 static u64 core_reg_offset_from_id(u64 id) 56 { 57 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); 58 } 59 60 static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off) 61 { 62 int size; 63 64 switch (off) { 65 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ... 66 KVM_REG_ARM_CORE_REG(regs.regs[30]): 67 case KVM_REG_ARM_CORE_REG(regs.sp): 68 case KVM_REG_ARM_CORE_REG(regs.pc): 69 case KVM_REG_ARM_CORE_REG(regs.pstate): 70 case KVM_REG_ARM_CORE_REG(sp_el1): 71 case KVM_REG_ARM_CORE_REG(elr_el1): 72 case KVM_REG_ARM_CORE_REG(spsr[0]) ... 73 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]): 74 size = sizeof(__u64); 75 break; 76 77 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... 78 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): 79 size = sizeof(__uint128_t); 80 break; 81 82 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): 83 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): 84 size = sizeof(__u32); 85 break; 86 87 default: 88 return -EINVAL; 89 } 90 91 if (!IS_ALIGNED(off, size / sizeof(__u32))) 92 return -EINVAL; 93 94 /* 95 * The KVM_REG_ARM64_SVE regs must be used instead of 96 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on 97 * SVE-enabled vcpus: 98 */ 99 if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off)) 100 return -EINVAL; 101 102 return size; 103 } 104 105 static int validate_core_offset(const struct kvm_vcpu *vcpu, 106 const struct kvm_one_reg *reg) 107 { 108 u64 off = core_reg_offset_from_id(reg->id); 109 int size = core_reg_size_from_offset(vcpu, off); 110 111 if (size < 0) 112 return -EINVAL; 113 114 if (KVM_REG_SIZE(reg->id) != size) 115 return -EINVAL; 116 117 return 0; 118 } 119 120 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 121 { 122 /* 123 * Because the kvm_regs structure is a mix of 32, 64 and 124 * 128bit fields, we index it as if it was a 32bit 125 * array. Hence below, nr_regs is the number of entries, and 126 * off the index in the "array". 127 */ 128 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; 129 struct kvm_regs *regs = vcpu_gp_regs(vcpu); 130 int nr_regs = sizeof(*regs) / sizeof(__u32); 131 u32 off; 132 133 /* Our ID is an index into the kvm_regs struct. */ 134 off = core_reg_offset_from_id(reg->id); 135 if (off >= nr_regs || 136 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) 137 return -ENOENT; 138 139 if (validate_core_offset(vcpu, reg)) 140 return -EINVAL; 141 142 if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) 143 return -EFAULT; 144 145 return 0; 146 } 147 148 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 149 { 150 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; 151 struct kvm_regs *regs = vcpu_gp_regs(vcpu); 152 int nr_regs = sizeof(*regs) / sizeof(__u32); 153 __uint128_t tmp; 154 void *valp = &tmp; 155 u64 off; 156 int err = 0; 157 158 /* Our ID is an index into the kvm_regs struct. */ 159 off = core_reg_offset_from_id(reg->id); 160 if (off >= nr_regs || 161 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) 162 return -ENOENT; 163 164 if (validate_core_offset(vcpu, reg)) 165 return -EINVAL; 166 167 if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) 168 return -EINVAL; 169 170 if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) { 171 err = -EFAULT; 172 goto out; 173 } 174 175 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { 176 u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK; 177 switch (mode) { 178 case PSR_AA32_MODE_USR: 179 if (!system_supports_32bit_el0()) 180 return -EINVAL; 181 break; 182 case PSR_AA32_MODE_FIQ: 183 case PSR_AA32_MODE_IRQ: 184 case PSR_AA32_MODE_SVC: 185 case PSR_AA32_MODE_ABT: 186 case PSR_AA32_MODE_UND: 187 if (!vcpu_el1_is_32bit(vcpu)) 188 return -EINVAL; 189 break; 190 case PSR_MODE_EL0t: 191 case PSR_MODE_EL1t: 192 case PSR_MODE_EL1h: 193 if (vcpu_el1_is_32bit(vcpu)) 194 return -EINVAL; 195 break; 196 default: 197 err = -EINVAL; 198 goto out; 199 } 200 } 201 202 memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); 203 out: 204 return err; 205 } 206 207 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) 208 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) 209 #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq))) 210 211 static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 212 { 213 unsigned int max_vq, vq; 214 u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; 215 216 if (!vcpu_has_sve(vcpu)) 217 return -ENOENT; 218 219 if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl))) 220 return -EINVAL; 221 222 memset(vqs, 0, sizeof(vqs)); 223 224 max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); 225 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) 226 if (sve_vq_available(vq)) 227 vqs[vq_word(vq)] |= vq_mask(vq); 228 229 if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs))) 230 return -EFAULT; 231 232 return 0; 233 } 234 235 static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 236 { 237 unsigned int max_vq, vq; 238 u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; 239 240 if (!vcpu_has_sve(vcpu)) 241 return -ENOENT; 242 243 if (kvm_arm_vcpu_sve_finalized(vcpu)) 244 return -EPERM; /* too late! */ 245 246 if (WARN_ON(vcpu->arch.sve_state)) 247 return -EINVAL; 248 249 if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs))) 250 return -EFAULT; 251 252 max_vq = 0; 253 for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq) 254 if (vq_present(vqs, vq)) 255 max_vq = vq; 256 257 if (max_vq > sve_vq_from_vl(kvm_sve_max_vl)) 258 return -EINVAL; 259 260 /* 261 * Vector lengths supported by the host can't currently be 262 * hidden from the guest individually: instead we can only set a 263 * maxmium via ZCR_EL2.LEN. So, make sure the available vector 264 * lengths match the set requested exactly up to the requested 265 * maximum: 266 */ 267 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) 268 if (vq_present(vqs, vq) != sve_vq_available(vq)) 269 return -EINVAL; 270 271 /* Can't run with no vector lengths at all: */ 272 if (max_vq < SVE_VQ_MIN) 273 return -EINVAL; 274 275 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */ 276 vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq); 277 278 return 0; 279 } 280 281 #define SVE_REG_SLICE_SHIFT 0 282 #define SVE_REG_SLICE_BITS 5 283 #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS) 284 #define SVE_REG_ID_BITS 5 285 286 #define SVE_REG_SLICE_MASK \ 287 GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \ 288 SVE_REG_SLICE_SHIFT) 289 #define SVE_REG_ID_MASK \ 290 GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT) 291 292 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS) 293 294 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0)) 295 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0)) 296 297 /* 298 * Number of register slices required to cover each whole SVE register. 299 * NOTE: Only the first slice every exists, for now. 300 * If you are tempted to modify this, you must also rework sve_reg_to_region() 301 * to match: 302 */ 303 #define vcpu_sve_slices(vcpu) 1 304 305 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */ 306 struct sve_state_reg_region { 307 unsigned int koffset; /* offset into sve_state in kernel memory */ 308 unsigned int klen; /* length in kernel memory */ 309 unsigned int upad; /* extra trailing padding in user memory */ 310 }; 311 312 /* 313 * Validate SVE register ID and get sanitised bounds for user/kernel SVE 314 * register copy 315 */ 316 static int sve_reg_to_region(struct sve_state_reg_region *region, 317 struct kvm_vcpu *vcpu, 318 const struct kvm_one_reg *reg) 319 { 320 /* reg ID ranges for Z- registers */ 321 const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0); 322 const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1, 323 SVE_NUM_SLICES - 1); 324 325 /* reg ID ranges for P- registers and FFR (which are contiguous) */ 326 const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0); 327 const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1); 328 329 unsigned int vq; 330 unsigned int reg_num; 331 332 unsigned int reqoffset, reqlen; /* User-requested offset and length */ 333 unsigned int maxlen; /* Maxmimum permitted length */ 334 335 size_t sve_state_size; 336 337 const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1, 338 SVE_NUM_SLICES - 1); 339 340 /* Verify that the P-regs and FFR really do have contiguous IDs: */ 341 BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1); 342 343 /* Verify that we match the UAPI header: */ 344 BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES); 345 346 reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT; 347 348 if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) { 349 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) 350 return -ENOENT; 351 352 vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); 353 354 reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) - 355 SVE_SIG_REGS_OFFSET; 356 reqlen = KVM_SVE_ZREG_SIZE; 357 maxlen = SVE_SIG_ZREG_SIZE(vq); 358 } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) { 359 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) 360 return -ENOENT; 361 362 vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); 363 364 reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) - 365 SVE_SIG_REGS_OFFSET; 366 reqlen = KVM_SVE_PREG_SIZE; 367 maxlen = SVE_SIG_PREG_SIZE(vq); 368 } else { 369 return -EINVAL; 370 } 371 372 sve_state_size = vcpu_sve_state_size(vcpu); 373 if (WARN_ON(!sve_state_size)) 374 return -EINVAL; 375 376 region->koffset = array_index_nospec(reqoffset, sve_state_size); 377 region->klen = min(maxlen, reqlen); 378 region->upad = reqlen - region->klen; 379 380 return 0; 381 } 382 383 static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 384 { 385 int ret; 386 struct sve_state_reg_region region; 387 char __user *uptr = (char __user *)reg->addr; 388 389 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ 390 if (reg->id == KVM_REG_ARM64_SVE_VLS) 391 return get_sve_vls(vcpu, reg); 392 393 /* Try to interpret reg ID as an architectural SVE register... */ 394 ret = sve_reg_to_region(®ion, vcpu, reg); 395 if (ret) 396 return ret; 397 398 if (!kvm_arm_vcpu_sve_finalized(vcpu)) 399 return -EPERM; 400 401 if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, 402 region.klen) || 403 clear_user(uptr + region.klen, region.upad)) 404 return -EFAULT; 405 406 return 0; 407 } 408 409 static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 410 { 411 int ret; 412 struct sve_state_reg_region region; 413 const char __user *uptr = (const char __user *)reg->addr; 414 415 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ 416 if (reg->id == KVM_REG_ARM64_SVE_VLS) 417 return set_sve_vls(vcpu, reg); 418 419 /* Try to interpret reg ID as an architectural SVE register... */ 420 ret = sve_reg_to_region(®ion, vcpu, reg); 421 if (ret) 422 return ret; 423 424 if (!kvm_arm_vcpu_sve_finalized(vcpu)) 425 return -EPERM; 426 427 if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, 428 region.klen)) 429 return -EFAULT; 430 431 return 0; 432 } 433 434 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 435 { 436 return -EINVAL; 437 } 438 439 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 440 { 441 return -EINVAL; 442 } 443 444 static int copy_core_reg_indices(const struct kvm_vcpu *vcpu, 445 u64 __user *uindices) 446 { 447 unsigned int i; 448 int n = 0; 449 450 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { 451 u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i; 452 int size = core_reg_size_from_offset(vcpu, i); 453 454 if (size < 0) 455 continue; 456 457 switch (size) { 458 case sizeof(__u32): 459 reg |= KVM_REG_SIZE_U32; 460 break; 461 462 case sizeof(__u64): 463 reg |= KVM_REG_SIZE_U64; 464 break; 465 466 case sizeof(__uint128_t): 467 reg |= KVM_REG_SIZE_U128; 468 break; 469 470 default: 471 WARN_ON(1); 472 continue; 473 } 474 475 if (uindices) { 476 if (put_user(reg, uindices)) 477 return -EFAULT; 478 uindices++; 479 } 480 481 n++; 482 } 483 484 return n; 485 } 486 487 static unsigned long num_core_regs(const struct kvm_vcpu *vcpu) 488 { 489 return copy_core_reg_indices(vcpu, NULL); 490 } 491 492 /** 493 * ARM64 versions of the TIMER registers, always available on arm64 494 */ 495 496 #define NUM_TIMER_REGS 3 497 498 static bool is_timer_reg(u64 index) 499 { 500 switch (index) { 501 case KVM_REG_ARM_TIMER_CTL: 502 case KVM_REG_ARM_TIMER_CNT: 503 case KVM_REG_ARM_TIMER_CVAL: 504 return true; 505 } 506 return false; 507 } 508 509 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 510 { 511 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) 512 return -EFAULT; 513 uindices++; 514 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) 515 return -EFAULT; 516 uindices++; 517 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) 518 return -EFAULT; 519 520 return 0; 521 } 522 523 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 524 { 525 void __user *uaddr = (void __user *)(long)reg->addr; 526 u64 val; 527 int ret; 528 529 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); 530 if (ret != 0) 531 return -EFAULT; 532 533 return kvm_arm_timer_set_reg(vcpu, reg->id, val); 534 } 535 536 static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 537 { 538 void __user *uaddr = (void __user *)(long)reg->addr; 539 u64 val; 540 541 val = kvm_arm_timer_get_reg(vcpu, reg->id); 542 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; 543 } 544 545 static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) 546 { 547 const unsigned int slices = vcpu_sve_slices(vcpu); 548 549 if (!vcpu_has_sve(vcpu)) 550 return 0; 551 552 /* Policed by KVM_GET_REG_LIST: */ 553 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); 554 555 return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */) 556 + 1; /* KVM_REG_ARM64_SVE_VLS */ 557 } 558 559 static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, 560 u64 __user *uindices) 561 { 562 const unsigned int slices = vcpu_sve_slices(vcpu); 563 u64 reg; 564 unsigned int i, n; 565 int num_regs = 0; 566 567 if (!vcpu_has_sve(vcpu)) 568 return 0; 569 570 /* Policed by KVM_GET_REG_LIST: */ 571 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); 572 573 /* 574 * Enumerate this first, so that userspace can save/restore in 575 * the order reported by KVM_GET_REG_LIST: 576 */ 577 reg = KVM_REG_ARM64_SVE_VLS; 578 if (put_user(reg, uindices++)) 579 return -EFAULT; 580 ++num_regs; 581 582 for (i = 0; i < slices; i++) { 583 for (n = 0; n < SVE_NUM_ZREGS; n++) { 584 reg = KVM_REG_ARM64_SVE_ZREG(n, i); 585 if (put_user(reg, uindices++)) 586 return -EFAULT; 587 num_regs++; 588 } 589 590 for (n = 0; n < SVE_NUM_PREGS; n++) { 591 reg = KVM_REG_ARM64_SVE_PREG(n, i); 592 if (put_user(reg, uindices++)) 593 return -EFAULT; 594 num_regs++; 595 } 596 597 reg = KVM_REG_ARM64_SVE_FFR(i); 598 if (put_user(reg, uindices++)) 599 return -EFAULT; 600 num_regs++; 601 } 602 603 return num_regs; 604 } 605 606 /** 607 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG 608 * 609 * This is for all registers. 610 */ 611 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 612 { 613 unsigned long res = 0; 614 615 res += num_core_regs(vcpu); 616 res += num_sve_regs(vcpu); 617 res += kvm_arm_num_sys_reg_descs(vcpu); 618 res += kvm_arm_get_fw_num_regs(vcpu); 619 res += NUM_TIMER_REGS; 620 621 return res; 622 } 623 624 /** 625 * kvm_arm_copy_reg_indices - get indices of all registers. 626 * 627 * We do core registers right here, then we append system regs. 628 */ 629 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 630 { 631 int ret; 632 633 ret = copy_core_reg_indices(vcpu, uindices); 634 if (ret < 0) 635 return ret; 636 uindices += ret; 637 638 ret = copy_sve_reg_indices(vcpu, uindices); 639 if (ret < 0) 640 return ret; 641 uindices += ret; 642 643 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); 644 if (ret < 0) 645 return ret; 646 uindices += kvm_arm_get_fw_num_regs(vcpu); 647 648 ret = copy_timer_indices(vcpu, uindices); 649 if (ret < 0) 650 return ret; 651 uindices += NUM_TIMER_REGS; 652 653 return kvm_arm_copy_sys_reg_indices(vcpu, uindices); 654 } 655 656 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 657 { 658 /* We currently use nothing arch-specific in upper 32 bits */ 659 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) 660 return -EINVAL; 661 662 switch (reg->id & KVM_REG_ARM_COPROC_MASK) { 663 case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg); 664 case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg); 665 case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg); 666 } 667 668 if (is_timer_reg(reg->id)) 669 return get_timer_reg(vcpu, reg); 670 671 return kvm_arm_sys_reg_get_reg(vcpu, reg); 672 } 673 674 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 675 { 676 /* We currently use nothing arch-specific in upper 32 bits */ 677 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) 678 return -EINVAL; 679 680 switch (reg->id & KVM_REG_ARM_COPROC_MASK) { 681 case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg); 682 case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg); 683 case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg); 684 } 685 686 if (is_timer_reg(reg->id)) 687 return set_timer_reg(vcpu, reg); 688 689 return kvm_arm_sys_reg_set_reg(vcpu, reg); 690 } 691 692 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 693 struct kvm_sregs *sregs) 694 { 695 return -EINVAL; 696 } 697 698 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 699 struct kvm_sregs *sregs) 700 { 701 return -EINVAL; 702 } 703 704 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 705 struct kvm_vcpu_events *events) 706 { 707 events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); 708 events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); 709 710 if (events->exception.serror_pending && events->exception.serror_has_esr) 711 events->exception.serror_esr = vcpu_get_vsesr(vcpu); 712 713 /* 714 * We never return a pending ext_dabt here because we deliver it to 715 * the virtual CPU directly when setting the event and it's no longer 716 * 'pending' at this point. 717 */ 718 719 return 0; 720 } 721 722 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 723 struct kvm_vcpu_events *events) 724 { 725 bool serror_pending = events->exception.serror_pending; 726 bool has_esr = events->exception.serror_has_esr; 727 bool ext_dabt_pending = events->exception.ext_dabt_pending; 728 729 if (serror_pending && has_esr) { 730 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) 731 return -EINVAL; 732 733 if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK)) 734 kvm_set_sei_esr(vcpu, events->exception.serror_esr); 735 else 736 return -EINVAL; 737 } else if (serror_pending) { 738 kvm_inject_vabt(vcpu); 739 } 740 741 if (ext_dabt_pending) 742 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); 743 744 return 0; 745 } 746 747 int __attribute_const__ kvm_target_cpu(void) 748 { 749 unsigned long implementor = read_cpuid_implementor(); 750 unsigned long part_number = read_cpuid_part_number(); 751 752 switch (implementor) { 753 case ARM_CPU_IMP_ARM: 754 switch (part_number) { 755 case ARM_CPU_PART_AEM_V8: 756 return KVM_ARM_TARGET_AEM_V8; 757 case ARM_CPU_PART_FOUNDATION: 758 return KVM_ARM_TARGET_FOUNDATION_V8; 759 case ARM_CPU_PART_CORTEX_A53: 760 return KVM_ARM_TARGET_CORTEX_A53; 761 case ARM_CPU_PART_CORTEX_A57: 762 return KVM_ARM_TARGET_CORTEX_A57; 763 } 764 break; 765 case ARM_CPU_IMP_APM: 766 switch (part_number) { 767 case APM_CPU_PART_POTENZA: 768 return KVM_ARM_TARGET_XGENE_POTENZA; 769 } 770 break; 771 } 772 773 /* Return a default generic target */ 774 return KVM_ARM_TARGET_GENERIC_V8; 775 } 776 777 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) 778 { 779 int target = kvm_target_cpu(); 780 781 if (target < 0) 782 return -ENODEV; 783 784 memset(init, 0, sizeof(*init)); 785 786 /* 787 * For now, we don't return any features. 788 * In future, we might use features to return target 789 * specific features available for the preferred 790 * target type. 791 */ 792 init->target = (__u32)target; 793 794 return 0; 795 } 796 797 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 798 { 799 return -EINVAL; 800 } 801 802 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 803 { 804 return -EINVAL; 805 } 806 807 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 808 struct kvm_translation *tr) 809 { 810 return -EINVAL; 811 } 812 813 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ 814 KVM_GUESTDBG_USE_SW_BP | \ 815 KVM_GUESTDBG_USE_HW | \ 816 KVM_GUESTDBG_SINGLESTEP) 817 818 /** 819 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging 820 * @kvm: pointer to the KVM struct 821 * @kvm_guest_debug: the ioctl data buffer 822 * 823 * This sets up and enables the VM for guest debugging. Userspace 824 * passes in a control flag to enable different debug types and 825 * potentially other architecture specific information in the rest of 826 * the structure. 827 */ 828 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 829 struct kvm_guest_debug *dbg) 830 { 831 int ret = 0; 832 833 trace_kvm_set_guest_debug(vcpu, dbg->control); 834 835 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { 836 ret = -EINVAL; 837 goto out; 838 } 839 840 if (dbg->control & KVM_GUESTDBG_ENABLE) { 841 vcpu->guest_debug = dbg->control; 842 843 /* Hardware assisted Break and Watch points */ 844 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) { 845 vcpu->arch.external_debug_state = dbg->arch; 846 } 847 848 } else { 849 /* If not enabled clear all flags */ 850 vcpu->guest_debug = 0; 851 } 852 853 out: 854 return ret; 855 } 856 857 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 858 struct kvm_device_attr *attr) 859 { 860 int ret; 861 862 switch (attr->group) { 863 case KVM_ARM_VCPU_PMU_V3_CTRL: 864 ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); 865 break; 866 case KVM_ARM_VCPU_TIMER_CTRL: 867 ret = kvm_arm_timer_set_attr(vcpu, attr); 868 break; 869 case KVM_ARM_VCPU_PVTIME_CTRL: 870 ret = kvm_arm_pvtime_set_attr(vcpu, attr); 871 break; 872 default: 873 ret = -ENXIO; 874 break; 875 } 876 877 return ret; 878 } 879 880 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 881 struct kvm_device_attr *attr) 882 { 883 int ret; 884 885 switch (attr->group) { 886 case KVM_ARM_VCPU_PMU_V3_CTRL: 887 ret = kvm_arm_pmu_v3_get_attr(vcpu, attr); 888 break; 889 case KVM_ARM_VCPU_TIMER_CTRL: 890 ret = kvm_arm_timer_get_attr(vcpu, attr); 891 break; 892 case KVM_ARM_VCPU_PVTIME_CTRL: 893 ret = kvm_arm_pvtime_get_attr(vcpu, attr); 894 break; 895 default: 896 ret = -ENXIO; 897 break; 898 } 899 900 return ret; 901 } 902 903 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 904 struct kvm_device_attr *attr) 905 { 906 int ret; 907 908 switch (attr->group) { 909 case KVM_ARM_VCPU_PMU_V3_CTRL: 910 ret = kvm_arm_pmu_v3_has_attr(vcpu, attr); 911 break; 912 case KVM_ARM_VCPU_TIMER_CTRL: 913 ret = kvm_arm_timer_has_attr(vcpu, attr); 914 break; 915 case KVM_ARM_VCPU_PVTIME_CTRL: 916 ret = kvm_arm_pvtime_has_attr(vcpu, attr); 917 break; 918 default: 919 ret = -ENXIO; 920 break; 921 } 922 923 return ret; 924 } 925