1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/kvm/guest.c: 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #include <linux/bits.h> 12 #include <linux/errno.h> 13 #include <linux/err.h> 14 #include <linux/nospec.h> 15 #include <linux/kvm_host.h> 16 #include <linux/module.h> 17 #include <linux/stddef.h> 18 #include <linux/string.h> 19 #include <linux/vmalloc.h> 20 #include <linux/fs.h> 21 #include <kvm/arm_psci.h> 22 #include <asm/cputype.h> 23 #include <linux/uaccess.h> 24 #include <asm/fpsimd.h> 25 #include <asm/kvm.h> 26 #include <asm/kvm_emulate.h> 27 #include <asm/kvm_coproc.h> 28 #include <asm/sigcontext.h> 29 30 #include "trace.h" 31 32 struct kvm_stats_debugfs_item debugfs_entries[] = { 33 VCPU_STAT("halt_successful_poll", halt_successful_poll), 34 VCPU_STAT("halt_attempted_poll", halt_attempted_poll), 35 VCPU_STAT("halt_poll_invalid", halt_poll_invalid), 36 VCPU_STAT("halt_wakeup", halt_wakeup), 37 VCPU_STAT("hvc_exit_stat", hvc_exit_stat), 38 VCPU_STAT("wfe_exit_stat", wfe_exit_stat), 39 VCPU_STAT("wfi_exit_stat", wfi_exit_stat), 40 VCPU_STAT("mmio_exit_user", mmio_exit_user), 41 VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel), 42 VCPU_STAT("exits", exits), 43 VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns), 44 VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns), 45 { NULL } 46 }; 47 48 static bool core_reg_offset_is_vreg(u64 off) 49 { 50 return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) && 51 off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr); 52 } 53 54 static u64 core_reg_offset_from_id(u64 id) 55 { 56 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); 57 } 58 59 static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off) 60 { 61 int size; 62 63 switch (off) { 64 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ... 65 KVM_REG_ARM_CORE_REG(regs.regs[30]): 66 case KVM_REG_ARM_CORE_REG(regs.sp): 67 case KVM_REG_ARM_CORE_REG(regs.pc): 68 case KVM_REG_ARM_CORE_REG(regs.pstate): 69 case KVM_REG_ARM_CORE_REG(sp_el1): 70 case KVM_REG_ARM_CORE_REG(elr_el1): 71 case KVM_REG_ARM_CORE_REG(spsr[0]) ... 72 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]): 73 size = sizeof(__u64); 74 break; 75 76 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... 77 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): 78 size = sizeof(__uint128_t); 79 break; 80 81 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): 82 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): 83 size = sizeof(__u32); 84 break; 85 86 default: 87 return -EINVAL; 88 } 89 90 if (!IS_ALIGNED(off, size / sizeof(__u32))) 91 return -EINVAL; 92 93 /* 94 * The KVM_REG_ARM64_SVE regs must be used instead of 95 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on 96 * SVE-enabled vcpus: 97 */ 98 if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off)) 99 return -EINVAL; 100 101 return size; 102 } 103 104 static int validate_core_offset(const struct kvm_vcpu *vcpu, 105 const struct kvm_one_reg *reg) 106 { 107 u64 off = core_reg_offset_from_id(reg->id); 108 int size = core_reg_size_from_offset(vcpu, off); 109 110 if (size < 0) 111 return -EINVAL; 112 113 if (KVM_REG_SIZE(reg->id) != size) 114 return -EINVAL; 115 116 return 0; 117 } 118 119 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 120 { 121 /* 122 * Because the kvm_regs structure is a mix of 32, 64 and 123 * 128bit fields, we index it as if it was a 32bit 124 * array. Hence below, nr_regs is the number of entries, and 125 * off the index in the "array". 126 */ 127 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; 128 struct kvm_regs *regs = vcpu_gp_regs(vcpu); 129 int nr_regs = sizeof(*regs) / sizeof(__u32); 130 u32 off; 131 132 /* Our ID is an index into the kvm_regs struct. */ 133 off = core_reg_offset_from_id(reg->id); 134 if (off >= nr_regs || 135 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) 136 return -ENOENT; 137 138 if (validate_core_offset(vcpu, reg)) 139 return -EINVAL; 140 141 if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) 142 return -EFAULT; 143 144 return 0; 145 } 146 147 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 148 { 149 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; 150 struct kvm_regs *regs = vcpu_gp_regs(vcpu); 151 int nr_regs = sizeof(*regs) / sizeof(__u32); 152 __uint128_t tmp; 153 void *valp = &tmp; 154 u64 off; 155 int err = 0; 156 157 /* Our ID is an index into the kvm_regs struct. */ 158 off = core_reg_offset_from_id(reg->id); 159 if (off >= nr_regs || 160 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) 161 return -ENOENT; 162 163 if (validate_core_offset(vcpu, reg)) 164 return -EINVAL; 165 166 if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) 167 return -EINVAL; 168 169 if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) { 170 err = -EFAULT; 171 goto out; 172 } 173 174 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { 175 u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK; 176 switch (mode) { 177 case PSR_AA32_MODE_USR: 178 if (!system_supports_32bit_el0()) 179 return -EINVAL; 180 break; 181 case PSR_AA32_MODE_FIQ: 182 case PSR_AA32_MODE_IRQ: 183 case PSR_AA32_MODE_SVC: 184 case PSR_AA32_MODE_ABT: 185 case PSR_AA32_MODE_UND: 186 if (!vcpu_el1_is_32bit(vcpu)) 187 return -EINVAL; 188 break; 189 case PSR_MODE_EL0t: 190 case PSR_MODE_EL1t: 191 case PSR_MODE_EL1h: 192 if (vcpu_el1_is_32bit(vcpu)) 193 return -EINVAL; 194 break; 195 default: 196 err = -EINVAL; 197 goto out; 198 } 199 } 200 201 memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); 202 203 if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) { 204 int i; 205 206 for (i = 0; i < 16; i++) 207 *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i); 208 } 209 out: 210 return err; 211 } 212 213 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) 214 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) 215 #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq))) 216 217 static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 218 { 219 unsigned int max_vq, vq; 220 u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; 221 222 if (!vcpu_has_sve(vcpu)) 223 return -ENOENT; 224 225 if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl))) 226 return -EINVAL; 227 228 memset(vqs, 0, sizeof(vqs)); 229 230 max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); 231 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) 232 if (sve_vq_available(vq)) 233 vqs[vq_word(vq)] |= vq_mask(vq); 234 235 if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs))) 236 return -EFAULT; 237 238 return 0; 239 } 240 241 static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 242 { 243 unsigned int max_vq, vq; 244 u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; 245 246 if (!vcpu_has_sve(vcpu)) 247 return -ENOENT; 248 249 if (kvm_arm_vcpu_sve_finalized(vcpu)) 250 return -EPERM; /* too late! */ 251 252 if (WARN_ON(vcpu->arch.sve_state)) 253 return -EINVAL; 254 255 if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs))) 256 return -EFAULT; 257 258 max_vq = 0; 259 for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq) 260 if (vq_present(vqs, vq)) 261 max_vq = vq; 262 263 if (max_vq > sve_vq_from_vl(kvm_sve_max_vl)) 264 return -EINVAL; 265 266 /* 267 * Vector lengths supported by the host can't currently be 268 * hidden from the guest individually: instead we can only set a 269 * maximum via ZCR_EL2.LEN. So, make sure the available vector 270 * lengths match the set requested exactly up to the requested 271 * maximum: 272 */ 273 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) 274 if (vq_present(vqs, vq) != sve_vq_available(vq)) 275 return -EINVAL; 276 277 /* Can't run with no vector lengths at all: */ 278 if (max_vq < SVE_VQ_MIN) 279 return -EINVAL; 280 281 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */ 282 vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq); 283 284 return 0; 285 } 286 287 #define SVE_REG_SLICE_SHIFT 0 288 #define SVE_REG_SLICE_BITS 5 289 #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS) 290 #define SVE_REG_ID_BITS 5 291 292 #define SVE_REG_SLICE_MASK \ 293 GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \ 294 SVE_REG_SLICE_SHIFT) 295 #define SVE_REG_ID_MASK \ 296 GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT) 297 298 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS) 299 300 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0)) 301 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0)) 302 303 /* 304 * Number of register slices required to cover each whole SVE register. 305 * NOTE: Only the first slice every exists, for now. 306 * If you are tempted to modify this, you must also rework sve_reg_to_region() 307 * to match: 308 */ 309 #define vcpu_sve_slices(vcpu) 1 310 311 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */ 312 struct sve_state_reg_region { 313 unsigned int koffset; /* offset into sve_state in kernel memory */ 314 unsigned int klen; /* length in kernel memory */ 315 unsigned int upad; /* extra trailing padding in user memory */ 316 }; 317 318 /* 319 * Validate SVE register ID and get sanitised bounds for user/kernel SVE 320 * register copy 321 */ 322 static int sve_reg_to_region(struct sve_state_reg_region *region, 323 struct kvm_vcpu *vcpu, 324 const struct kvm_one_reg *reg) 325 { 326 /* reg ID ranges for Z- registers */ 327 const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0); 328 const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1, 329 SVE_NUM_SLICES - 1); 330 331 /* reg ID ranges for P- registers and FFR (which are contiguous) */ 332 const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0); 333 const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1); 334 335 unsigned int vq; 336 unsigned int reg_num; 337 338 unsigned int reqoffset, reqlen; /* User-requested offset and length */ 339 unsigned int maxlen; /* Maximum permitted length */ 340 341 size_t sve_state_size; 342 343 const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1, 344 SVE_NUM_SLICES - 1); 345 346 /* Verify that the P-regs and FFR really do have contiguous IDs: */ 347 BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1); 348 349 /* Verify that we match the UAPI header: */ 350 BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES); 351 352 reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT; 353 354 if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) { 355 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) 356 return -ENOENT; 357 358 vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); 359 360 reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) - 361 SVE_SIG_REGS_OFFSET; 362 reqlen = KVM_SVE_ZREG_SIZE; 363 maxlen = SVE_SIG_ZREG_SIZE(vq); 364 } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) { 365 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) 366 return -ENOENT; 367 368 vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); 369 370 reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) - 371 SVE_SIG_REGS_OFFSET; 372 reqlen = KVM_SVE_PREG_SIZE; 373 maxlen = SVE_SIG_PREG_SIZE(vq); 374 } else { 375 return -EINVAL; 376 } 377 378 sve_state_size = vcpu_sve_state_size(vcpu); 379 if (WARN_ON(!sve_state_size)) 380 return -EINVAL; 381 382 region->koffset = array_index_nospec(reqoffset, sve_state_size); 383 region->klen = min(maxlen, reqlen); 384 region->upad = reqlen - region->klen; 385 386 return 0; 387 } 388 389 static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 390 { 391 int ret; 392 struct sve_state_reg_region region; 393 char __user *uptr = (char __user *)reg->addr; 394 395 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ 396 if (reg->id == KVM_REG_ARM64_SVE_VLS) 397 return get_sve_vls(vcpu, reg); 398 399 /* Try to interpret reg ID as an architectural SVE register... */ 400 ret = sve_reg_to_region(®ion, vcpu, reg); 401 if (ret) 402 return ret; 403 404 if (!kvm_arm_vcpu_sve_finalized(vcpu)) 405 return -EPERM; 406 407 if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, 408 region.klen) || 409 clear_user(uptr + region.klen, region.upad)) 410 return -EFAULT; 411 412 return 0; 413 } 414 415 static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 416 { 417 int ret; 418 struct sve_state_reg_region region; 419 const char __user *uptr = (const char __user *)reg->addr; 420 421 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ 422 if (reg->id == KVM_REG_ARM64_SVE_VLS) 423 return set_sve_vls(vcpu, reg); 424 425 /* Try to interpret reg ID as an architectural SVE register... */ 426 ret = sve_reg_to_region(®ion, vcpu, reg); 427 if (ret) 428 return ret; 429 430 if (!kvm_arm_vcpu_sve_finalized(vcpu)) 431 return -EPERM; 432 433 if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, 434 region.klen)) 435 return -EFAULT; 436 437 return 0; 438 } 439 440 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 441 { 442 return -EINVAL; 443 } 444 445 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 446 { 447 return -EINVAL; 448 } 449 450 static int copy_core_reg_indices(const struct kvm_vcpu *vcpu, 451 u64 __user *uindices) 452 { 453 unsigned int i; 454 int n = 0; 455 456 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { 457 u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i; 458 int size = core_reg_size_from_offset(vcpu, i); 459 460 if (size < 0) 461 continue; 462 463 switch (size) { 464 case sizeof(__u32): 465 reg |= KVM_REG_SIZE_U32; 466 break; 467 468 case sizeof(__u64): 469 reg |= KVM_REG_SIZE_U64; 470 break; 471 472 case sizeof(__uint128_t): 473 reg |= KVM_REG_SIZE_U128; 474 break; 475 476 default: 477 WARN_ON(1); 478 continue; 479 } 480 481 if (uindices) { 482 if (put_user(reg, uindices)) 483 return -EFAULT; 484 uindices++; 485 } 486 487 n++; 488 } 489 490 return n; 491 } 492 493 static unsigned long num_core_regs(const struct kvm_vcpu *vcpu) 494 { 495 return copy_core_reg_indices(vcpu, NULL); 496 } 497 498 /** 499 * ARM64 versions of the TIMER registers, always available on arm64 500 */ 501 502 #define NUM_TIMER_REGS 3 503 504 static bool is_timer_reg(u64 index) 505 { 506 switch (index) { 507 case KVM_REG_ARM_TIMER_CTL: 508 case KVM_REG_ARM_TIMER_CNT: 509 case KVM_REG_ARM_TIMER_CVAL: 510 return true; 511 } 512 return false; 513 } 514 515 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 516 { 517 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) 518 return -EFAULT; 519 uindices++; 520 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) 521 return -EFAULT; 522 uindices++; 523 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) 524 return -EFAULT; 525 526 return 0; 527 } 528 529 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 530 { 531 void __user *uaddr = (void __user *)(long)reg->addr; 532 u64 val; 533 int ret; 534 535 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); 536 if (ret != 0) 537 return -EFAULT; 538 539 return kvm_arm_timer_set_reg(vcpu, reg->id, val); 540 } 541 542 static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 543 { 544 void __user *uaddr = (void __user *)(long)reg->addr; 545 u64 val; 546 547 val = kvm_arm_timer_get_reg(vcpu, reg->id); 548 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; 549 } 550 551 static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) 552 { 553 const unsigned int slices = vcpu_sve_slices(vcpu); 554 555 if (!vcpu_has_sve(vcpu)) 556 return 0; 557 558 /* Policed by KVM_GET_REG_LIST: */ 559 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); 560 561 return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */) 562 + 1; /* KVM_REG_ARM64_SVE_VLS */ 563 } 564 565 static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, 566 u64 __user *uindices) 567 { 568 const unsigned int slices = vcpu_sve_slices(vcpu); 569 u64 reg; 570 unsigned int i, n; 571 int num_regs = 0; 572 573 if (!vcpu_has_sve(vcpu)) 574 return 0; 575 576 /* Policed by KVM_GET_REG_LIST: */ 577 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); 578 579 /* 580 * Enumerate this first, so that userspace can save/restore in 581 * the order reported by KVM_GET_REG_LIST: 582 */ 583 reg = KVM_REG_ARM64_SVE_VLS; 584 if (put_user(reg, uindices++)) 585 return -EFAULT; 586 ++num_regs; 587 588 for (i = 0; i < slices; i++) { 589 for (n = 0; n < SVE_NUM_ZREGS; n++) { 590 reg = KVM_REG_ARM64_SVE_ZREG(n, i); 591 if (put_user(reg, uindices++)) 592 return -EFAULT; 593 num_regs++; 594 } 595 596 for (n = 0; n < SVE_NUM_PREGS; n++) { 597 reg = KVM_REG_ARM64_SVE_PREG(n, i); 598 if (put_user(reg, uindices++)) 599 return -EFAULT; 600 num_regs++; 601 } 602 603 reg = KVM_REG_ARM64_SVE_FFR(i); 604 if (put_user(reg, uindices++)) 605 return -EFAULT; 606 num_regs++; 607 } 608 609 return num_regs; 610 } 611 612 /** 613 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG 614 * 615 * This is for all registers. 616 */ 617 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 618 { 619 unsigned long res = 0; 620 621 res += num_core_regs(vcpu); 622 res += num_sve_regs(vcpu); 623 res += kvm_arm_num_sys_reg_descs(vcpu); 624 res += kvm_arm_get_fw_num_regs(vcpu); 625 res += NUM_TIMER_REGS; 626 627 return res; 628 } 629 630 /** 631 * kvm_arm_copy_reg_indices - get indices of all registers. 632 * 633 * We do core registers right here, then we append system regs. 634 */ 635 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 636 { 637 int ret; 638 639 ret = copy_core_reg_indices(vcpu, uindices); 640 if (ret < 0) 641 return ret; 642 uindices += ret; 643 644 ret = copy_sve_reg_indices(vcpu, uindices); 645 if (ret < 0) 646 return ret; 647 uindices += ret; 648 649 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); 650 if (ret < 0) 651 return ret; 652 uindices += kvm_arm_get_fw_num_regs(vcpu); 653 654 ret = copy_timer_indices(vcpu, uindices); 655 if (ret < 0) 656 return ret; 657 uindices += NUM_TIMER_REGS; 658 659 return kvm_arm_copy_sys_reg_indices(vcpu, uindices); 660 } 661 662 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 663 { 664 /* We currently use nothing arch-specific in upper 32 bits */ 665 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) 666 return -EINVAL; 667 668 switch (reg->id & KVM_REG_ARM_COPROC_MASK) { 669 case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg); 670 case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg); 671 case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg); 672 } 673 674 if (is_timer_reg(reg->id)) 675 return get_timer_reg(vcpu, reg); 676 677 return kvm_arm_sys_reg_get_reg(vcpu, reg); 678 } 679 680 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 681 { 682 /* We currently use nothing arch-specific in upper 32 bits */ 683 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) 684 return -EINVAL; 685 686 switch (reg->id & KVM_REG_ARM_COPROC_MASK) { 687 case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg); 688 case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg); 689 case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg); 690 } 691 692 if (is_timer_reg(reg->id)) 693 return set_timer_reg(vcpu, reg); 694 695 return kvm_arm_sys_reg_set_reg(vcpu, reg); 696 } 697 698 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 699 struct kvm_sregs *sregs) 700 { 701 return -EINVAL; 702 } 703 704 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 705 struct kvm_sregs *sregs) 706 { 707 return -EINVAL; 708 } 709 710 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 711 struct kvm_vcpu_events *events) 712 { 713 events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); 714 events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); 715 716 if (events->exception.serror_pending && events->exception.serror_has_esr) 717 events->exception.serror_esr = vcpu_get_vsesr(vcpu); 718 719 /* 720 * We never return a pending ext_dabt here because we deliver it to 721 * the virtual CPU directly when setting the event and it's no longer 722 * 'pending' at this point. 723 */ 724 725 return 0; 726 } 727 728 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 729 struct kvm_vcpu_events *events) 730 { 731 bool serror_pending = events->exception.serror_pending; 732 bool has_esr = events->exception.serror_has_esr; 733 bool ext_dabt_pending = events->exception.ext_dabt_pending; 734 735 if (serror_pending && has_esr) { 736 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) 737 return -EINVAL; 738 739 if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK)) 740 kvm_set_sei_esr(vcpu, events->exception.serror_esr); 741 else 742 return -EINVAL; 743 } else if (serror_pending) { 744 kvm_inject_vabt(vcpu); 745 } 746 747 if (ext_dabt_pending) 748 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); 749 750 return 0; 751 } 752 753 int __attribute_const__ kvm_target_cpu(void) 754 { 755 unsigned long implementor = read_cpuid_implementor(); 756 unsigned long part_number = read_cpuid_part_number(); 757 758 switch (implementor) { 759 case ARM_CPU_IMP_ARM: 760 switch (part_number) { 761 case ARM_CPU_PART_AEM_V8: 762 return KVM_ARM_TARGET_AEM_V8; 763 case ARM_CPU_PART_FOUNDATION: 764 return KVM_ARM_TARGET_FOUNDATION_V8; 765 case ARM_CPU_PART_CORTEX_A53: 766 return KVM_ARM_TARGET_CORTEX_A53; 767 case ARM_CPU_PART_CORTEX_A57: 768 return KVM_ARM_TARGET_CORTEX_A57; 769 } 770 break; 771 case ARM_CPU_IMP_APM: 772 switch (part_number) { 773 case APM_CPU_PART_POTENZA: 774 return KVM_ARM_TARGET_XGENE_POTENZA; 775 } 776 break; 777 } 778 779 /* Return a default generic target */ 780 return KVM_ARM_TARGET_GENERIC_V8; 781 } 782 783 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) 784 { 785 int target = kvm_target_cpu(); 786 787 if (target < 0) 788 return -ENODEV; 789 790 memset(init, 0, sizeof(*init)); 791 792 /* 793 * For now, we don't return any features. 794 * In future, we might use features to return target 795 * specific features available for the preferred 796 * target type. 797 */ 798 init->target = (__u32)target; 799 800 return 0; 801 } 802 803 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 804 { 805 return -EINVAL; 806 } 807 808 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 809 { 810 return -EINVAL; 811 } 812 813 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 814 struct kvm_translation *tr) 815 { 816 return -EINVAL; 817 } 818 819 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ 820 KVM_GUESTDBG_USE_SW_BP | \ 821 KVM_GUESTDBG_USE_HW | \ 822 KVM_GUESTDBG_SINGLESTEP) 823 824 /** 825 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging 826 * @kvm: pointer to the KVM struct 827 * @kvm_guest_debug: the ioctl data buffer 828 * 829 * This sets up and enables the VM for guest debugging. Userspace 830 * passes in a control flag to enable different debug types and 831 * potentially other architecture specific information in the rest of 832 * the structure. 833 */ 834 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 835 struct kvm_guest_debug *dbg) 836 { 837 int ret = 0; 838 839 trace_kvm_set_guest_debug(vcpu, dbg->control); 840 841 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { 842 ret = -EINVAL; 843 goto out; 844 } 845 846 if (dbg->control & KVM_GUESTDBG_ENABLE) { 847 vcpu->guest_debug = dbg->control; 848 849 /* Hardware assisted Break and Watch points */ 850 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) { 851 vcpu->arch.external_debug_state = dbg->arch; 852 } 853 854 } else { 855 /* If not enabled clear all flags */ 856 vcpu->guest_debug = 0; 857 } 858 859 out: 860 return ret; 861 } 862 863 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 864 struct kvm_device_attr *attr) 865 { 866 int ret; 867 868 switch (attr->group) { 869 case KVM_ARM_VCPU_PMU_V3_CTRL: 870 ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); 871 break; 872 case KVM_ARM_VCPU_TIMER_CTRL: 873 ret = kvm_arm_timer_set_attr(vcpu, attr); 874 break; 875 case KVM_ARM_VCPU_PVTIME_CTRL: 876 ret = kvm_arm_pvtime_set_attr(vcpu, attr); 877 break; 878 default: 879 ret = -ENXIO; 880 break; 881 } 882 883 return ret; 884 } 885 886 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 887 struct kvm_device_attr *attr) 888 { 889 int ret; 890 891 switch (attr->group) { 892 case KVM_ARM_VCPU_PMU_V3_CTRL: 893 ret = kvm_arm_pmu_v3_get_attr(vcpu, attr); 894 break; 895 case KVM_ARM_VCPU_TIMER_CTRL: 896 ret = kvm_arm_timer_get_attr(vcpu, attr); 897 break; 898 case KVM_ARM_VCPU_PVTIME_CTRL: 899 ret = kvm_arm_pvtime_get_attr(vcpu, attr); 900 break; 901 default: 902 ret = -ENXIO; 903 break; 904 } 905 906 return ret; 907 } 908 909 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 910 struct kvm_device_attr *attr) 911 { 912 int ret; 913 914 switch (attr->group) { 915 case KVM_ARM_VCPU_PMU_V3_CTRL: 916 ret = kvm_arm_pmu_v3_has_attr(vcpu, attr); 917 break; 918 case KVM_ARM_VCPU_TIMER_CTRL: 919 ret = kvm_arm_timer_has_attr(vcpu, attr); 920 break; 921 case KVM_ARM_VCPU_PVTIME_CTRL: 922 ret = kvm_arm_pvtime_has_attr(vcpu, attr); 923 break; 924 default: 925 ret = -ENXIO; 926 break; 927 } 928 929 return ret; 930 } 931