1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4 * 5 * Authors: 6 * Anup Patel <anup.patel@wdc.com> 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/errno.h> 11 #include <linux/err.h> 12 #include <linux/kdebug.h> 13 #include <linux/module.h> 14 #include <linux/percpu.h> 15 #include <linux/uaccess.h> 16 #include <linux/vmalloc.h> 17 #include <linux/sched/signal.h> 18 #include <linux/fs.h> 19 #include <linux/kvm_host.h> 20 #include <asm/csr.h> 21 #include <asm/hwcap.h> 22 23 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 24 KVM_GENERIC_VCPU_STATS(), 25 STATS_DESC_COUNTER(VCPU, ecall_exit_stat), 26 STATS_DESC_COUNTER(VCPU, wfi_exit_stat), 27 STATS_DESC_COUNTER(VCPU, mmio_exit_user), 28 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel), 29 STATS_DESC_COUNTER(VCPU, exits) 30 }; 31 32 const struct kvm_stats_header kvm_vcpu_stats_header = { 33 .name_size = KVM_STATS_NAME_SIZE, 34 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), 35 .id_offset = sizeof(struct kvm_stats_header), 36 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 37 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 38 sizeof(kvm_vcpu_stats_desc), 39 }; 40 41 #define KVM_RISCV_ISA_DISABLE_ALLOWED (riscv_isa_extension_mask(d) | \ 42 riscv_isa_extension_mask(f)) 43 44 #define KVM_RISCV_ISA_DISABLE_NOT_ALLOWED (riscv_isa_extension_mask(a) | \ 45 riscv_isa_extension_mask(c) | \ 46 riscv_isa_extension_mask(i) | \ 47 riscv_isa_extension_mask(m)) 48 49 #define KVM_RISCV_ISA_ALLOWED (KVM_RISCV_ISA_DISABLE_ALLOWED | \ 50 KVM_RISCV_ISA_DISABLE_NOT_ALLOWED) 51 52 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) 53 { 54 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 55 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; 56 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; 57 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; 58 bool loaded; 59 60 /** 61 * The preemption should be disabled here because it races with 62 * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which 63 * also calls vcpu_load/put. 64 */ 65 get_cpu(); 66 loaded = (vcpu->cpu != -1); 67 if (loaded) 68 kvm_arch_vcpu_put(vcpu); 69 70 memcpy(csr, reset_csr, sizeof(*csr)); 71 72 memcpy(cntx, reset_cntx, sizeof(*cntx)); 73 74 kvm_riscv_vcpu_fp_reset(vcpu); 75 76 kvm_riscv_vcpu_timer_reset(vcpu); 77 78 WRITE_ONCE(vcpu->arch.irqs_pending, 0); 79 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0); 80 81 /* Reset the guest CSRs for hotplug usecase */ 82 if (loaded) 83 kvm_arch_vcpu_load(vcpu, smp_processor_id()); 84 put_cpu(); 85 } 86 87 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 88 { 89 return 0; 90 } 91 92 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 93 { 94 struct kvm_cpu_context *cntx; 95 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; 96 97 /* Mark this VCPU never ran */ 98 vcpu->arch.ran_atleast_once = false; 99 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; 100 101 /* Setup ISA features available to VCPU */ 102 vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED; 103 104 /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */ 105 cntx = &vcpu->arch.guest_reset_context; 106 cntx->sstatus = SR_SPP | SR_SPIE; 107 cntx->hstatus = 0; 108 cntx->hstatus |= HSTATUS_VTW; 109 cntx->hstatus |= HSTATUS_SPVP; 110 cntx->hstatus |= HSTATUS_SPV; 111 112 /* By default, make CY, TM, and IR counters accessible in VU mode */ 113 reset_csr->scounteren = 0x7; 114 115 /* Setup VCPU timer */ 116 kvm_riscv_vcpu_timer_init(vcpu); 117 118 /* Reset VCPU */ 119 kvm_riscv_reset_vcpu(vcpu); 120 121 return 0; 122 } 123 124 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 125 { 126 /** 127 * vcpu with id 0 is the designated boot cpu. 128 * Keep all vcpus with non-zero id in power-off state so that 129 * they can be brought up using SBI HSM extension. 130 */ 131 if (vcpu->vcpu_idx != 0) 132 kvm_riscv_vcpu_power_off(vcpu); 133 } 134 135 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 136 { 137 /* Cleanup VCPU timer */ 138 kvm_riscv_vcpu_timer_deinit(vcpu); 139 140 /* Free unused pages pre-allocated for Stage2 page table mappings */ 141 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); 142 } 143 144 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 145 { 146 return kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER); 147 } 148 149 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) 150 { 151 } 152 153 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) 154 { 155 } 156 157 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 158 { 159 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) && 160 !vcpu->arch.power_off && !vcpu->arch.pause); 161 } 162 163 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 164 { 165 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 166 } 167 168 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 169 { 170 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; 171 } 172 173 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 174 { 175 return VM_FAULT_SIGBUS; 176 } 177 178 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, 179 const struct kvm_one_reg *reg) 180 { 181 unsigned long __user *uaddr = 182 (unsigned long __user *)(unsigned long)reg->addr; 183 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 184 KVM_REG_SIZE_MASK | 185 KVM_REG_RISCV_CONFIG); 186 unsigned long reg_val; 187 188 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 189 return -EINVAL; 190 191 switch (reg_num) { 192 case KVM_REG_RISCV_CONFIG_REG(isa): 193 reg_val = vcpu->arch.isa; 194 break; 195 default: 196 return -EINVAL; 197 } 198 199 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) 200 return -EFAULT; 201 202 return 0; 203 } 204 205 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, 206 const struct kvm_one_reg *reg) 207 { 208 unsigned long __user *uaddr = 209 (unsigned long __user *)(unsigned long)reg->addr; 210 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 211 KVM_REG_SIZE_MASK | 212 KVM_REG_RISCV_CONFIG); 213 unsigned long reg_val; 214 215 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 216 return -EINVAL; 217 218 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) 219 return -EFAULT; 220 221 switch (reg_num) { 222 case KVM_REG_RISCV_CONFIG_REG(isa): 223 if (!vcpu->arch.ran_atleast_once) { 224 /* Ignore the disable request for these extensions */ 225 vcpu->arch.isa = reg_val | KVM_RISCV_ISA_DISABLE_NOT_ALLOWED; 226 vcpu->arch.isa &= riscv_isa_extension_base(NULL); 227 vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED; 228 kvm_riscv_vcpu_fp_reset(vcpu); 229 } else { 230 return -EOPNOTSUPP; 231 } 232 break; 233 default: 234 return -EINVAL; 235 } 236 237 return 0; 238 } 239 240 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu, 241 const struct kvm_one_reg *reg) 242 { 243 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; 244 unsigned long __user *uaddr = 245 (unsigned long __user *)(unsigned long)reg->addr; 246 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 247 KVM_REG_SIZE_MASK | 248 KVM_REG_RISCV_CORE); 249 unsigned long reg_val; 250 251 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 252 return -EINVAL; 253 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) 254 return -EINVAL; 255 256 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) 257 reg_val = cntx->sepc; 258 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && 259 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6)) 260 reg_val = ((unsigned long *)cntx)[reg_num]; 261 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) 262 reg_val = (cntx->sstatus & SR_SPP) ? 263 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U; 264 else 265 return -EINVAL; 266 267 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) 268 return -EFAULT; 269 270 return 0; 271 } 272 273 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu, 274 const struct kvm_one_reg *reg) 275 { 276 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; 277 unsigned long __user *uaddr = 278 (unsigned long __user *)(unsigned long)reg->addr; 279 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 280 KVM_REG_SIZE_MASK | 281 KVM_REG_RISCV_CORE); 282 unsigned long reg_val; 283 284 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 285 return -EINVAL; 286 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) 287 return -EINVAL; 288 289 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) 290 return -EFAULT; 291 292 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) 293 cntx->sepc = reg_val; 294 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && 295 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6)) 296 ((unsigned long *)cntx)[reg_num] = reg_val; 297 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) { 298 if (reg_val == KVM_RISCV_MODE_S) 299 cntx->sstatus |= SR_SPP; 300 else 301 cntx->sstatus &= ~SR_SPP; 302 } else 303 return -EINVAL; 304 305 return 0; 306 } 307 308 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu, 309 const struct kvm_one_reg *reg) 310 { 311 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 312 unsigned long __user *uaddr = 313 (unsigned long __user *)(unsigned long)reg->addr; 314 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 315 KVM_REG_SIZE_MASK | 316 KVM_REG_RISCV_CSR); 317 unsigned long reg_val; 318 319 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 320 return -EINVAL; 321 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) 322 return -EINVAL; 323 324 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { 325 kvm_riscv_vcpu_flush_interrupts(vcpu); 326 reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK; 327 } else 328 reg_val = ((unsigned long *)csr)[reg_num]; 329 330 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) 331 return -EFAULT; 332 333 return 0; 334 } 335 336 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, 337 const struct kvm_one_reg *reg) 338 { 339 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 340 unsigned long __user *uaddr = 341 (unsigned long __user *)(unsigned long)reg->addr; 342 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 343 KVM_REG_SIZE_MASK | 344 KVM_REG_RISCV_CSR); 345 unsigned long reg_val; 346 347 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 348 return -EINVAL; 349 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) 350 return -EINVAL; 351 352 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) 353 return -EFAULT; 354 355 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { 356 reg_val &= VSIP_VALID_MASK; 357 reg_val <<= VSIP_TO_HVIP_SHIFT; 358 } 359 360 ((unsigned long *)csr)[reg_num] = reg_val; 361 362 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) 363 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0); 364 365 return 0; 366 } 367 368 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, 369 const struct kvm_one_reg *reg) 370 { 371 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG) 372 return kvm_riscv_vcpu_set_reg_config(vcpu, reg); 373 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE) 374 return kvm_riscv_vcpu_set_reg_core(vcpu, reg); 375 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR) 376 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg); 377 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER) 378 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg); 379 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F) 380 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, 381 KVM_REG_RISCV_FP_F); 382 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D) 383 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, 384 KVM_REG_RISCV_FP_D); 385 386 return -EINVAL; 387 } 388 389 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, 390 const struct kvm_one_reg *reg) 391 { 392 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG) 393 return kvm_riscv_vcpu_get_reg_config(vcpu, reg); 394 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE) 395 return kvm_riscv_vcpu_get_reg_core(vcpu, reg); 396 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR) 397 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg); 398 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER) 399 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg); 400 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F) 401 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, 402 KVM_REG_RISCV_FP_F); 403 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D) 404 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, 405 KVM_REG_RISCV_FP_D); 406 407 return -EINVAL; 408 } 409 410 long kvm_arch_vcpu_async_ioctl(struct file *filp, 411 unsigned int ioctl, unsigned long arg) 412 { 413 struct kvm_vcpu *vcpu = filp->private_data; 414 void __user *argp = (void __user *)arg; 415 416 if (ioctl == KVM_INTERRUPT) { 417 struct kvm_interrupt irq; 418 419 if (copy_from_user(&irq, argp, sizeof(irq))) 420 return -EFAULT; 421 422 if (irq.irq == KVM_INTERRUPT_SET) 423 return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT); 424 else 425 return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT); 426 } 427 428 return -ENOIOCTLCMD; 429 } 430 431 long kvm_arch_vcpu_ioctl(struct file *filp, 432 unsigned int ioctl, unsigned long arg) 433 { 434 struct kvm_vcpu *vcpu = filp->private_data; 435 void __user *argp = (void __user *)arg; 436 long r = -EINVAL; 437 438 switch (ioctl) { 439 case KVM_SET_ONE_REG: 440 case KVM_GET_ONE_REG: { 441 struct kvm_one_reg reg; 442 443 r = -EFAULT; 444 if (copy_from_user(®, argp, sizeof(reg))) 445 break; 446 447 if (ioctl == KVM_SET_ONE_REG) 448 r = kvm_riscv_vcpu_set_reg(vcpu, ®); 449 else 450 r = kvm_riscv_vcpu_get_reg(vcpu, ®); 451 break; 452 } 453 default: 454 break; 455 } 456 457 return r; 458 } 459 460 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 461 struct kvm_sregs *sregs) 462 { 463 return -EINVAL; 464 } 465 466 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 467 struct kvm_sregs *sregs) 468 { 469 return -EINVAL; 470 } 471 472 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 473 { 474 return -EINVAL; 475 } 476 477 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 478 { 479 return -EINVAL; 480 } 481 482 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 483 struct kvm_translation *tr) 484 { 485 return -EINVAL; 486 } 487 488 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 489 { 490 return -EINVAL; 491 } 492 493 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 494 { 495 return -EINVAL; 496 } 497 498 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu) 499 { 500 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 501 unsigned long mask, val; 502 503 if (READ_ONCE(vcpu->arch.irqs_pending_mask)) { 504 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0); 505 val = READ_ONCE(vcpu->arch.irqs_pending) & mask; 506 507 csr->hvip &= ~mask; 508 csr->hvip |= val; 509 } 510 } 511 512 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu) 513 { 514 unsigned long hvip; 515 struct kvm_vcpu_arch *v = &vcpu->arch; 516 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 517 518 /* Read current HVIP and VSIE CSRs */ 519 csr->vsie = csr_read(CSR_VSIE); 520 521 /* Sync-up HVIP.VSSIP bit changes does by Guest */ 522 hvip = csr_read(CSR_HVIP); 523 if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) { 524 if (hvip & (1UL << IRQ_VS_SOFT)) { 525 if (!test_and_set_bit(IRQ_VS_SOFT, 526 &v->irqs_pending_mask)) 527 set_bit(IRQ_VS_SOFT, &v->irqs_pending); 528 } else { 529 if (!test_and_set_bit(IRQ_VS_SOFT, 530 &v->irqs_pending_mask)) 531 clear_bit(IRQ_VS_SOFT, &v->irqs_pending); 532 } 533 } 534 } 535 536 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) 537 { 538 if (irq != IRQ_VS_SOFT && 539 irq != IRQ_VS_TIMER && 540 irq != IRQ_VS_EXT) 541 return -EINVAL; 542 543 set_bit(irq, &vcpu->arch.irqs_pending); 544 smp_mb__before_atomic(); 545 set_bit(irq, &vcpu->arch.irqs_pending_mask); 546 547 kvm_vcpu_kick(vcpu); 548 549 return 0; 550 } 551 552 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) 553 { 554 if (irq != IRQ_VS_SOFT && 555 irq != IRQ_VS_TIMER && 556 irq != IRQ_VS_EXT) 557 return -EINVAL; 558 559 clear_bit(irq, &vcpu->arch.irqs_pending); 560 smp_mb__before_atomic(); 561 set_bit(irq, &vcpu->arch.irqs_pending_mask); 562 563 return 0; 564 } 565 566 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask) 567 { 568 unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK) 569 << VSIP_TO_HVIP_SHIFT) & mask; 570 571 return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false; 572 } 573 574 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu) 575 { 576 vcpu->arch.power_off = true; 577 kvm_make_request(KVM_REQ_SLEEP, vcpu); 578 kvm_vcpu_kick(vcpu); 579 } 580 581 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu) 582 { 583 vcpu->arch.power_off = false; 584 kvm_vcpu_wake_up(vcpu); 585 } 586 587 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 588 struct kvm_mp_state *mp_state) 589 { 590 if (vcpu->arch.power_off) 591 mp_state->mp_state = KVM_MP_STATE_STOPPED; 592 else 593 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; 594 595 return 0; 596 } 597 598 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 599 struct kvm_mp_state *mp_state) 600 { 601 int ret = 0; 602 603 switch (mp_state->mp_state) { 604 case KVM_MP_STATE_RUNNABLE: 605 vcpu->arch.power_off = false; 606 break; 607 case KVM_MP_STATE_STOPPED: 608 kvm_riscv_vcpu_power_off(vcpu); 609 break; 610 default: 611 ret = -EINVAL; 612 } 613 614 return ret; 615 } 616 617 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 618 struct kvm_guest_debug *dbg) 619 { 620 /* TODO; To be implemented later. */ 621 return -EINVAL; 622 } 623 624 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 625 { 626 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 627 628 csr_write(CSR_VSSTATUS, csr->vsstatus); 629 csr_write(CSR_VSIE, csr->vsie); 630 csr_write(CSR_VSTVEC, csr->vstvec); 631 csr_write(CSR_VSSCRATCH, csr->vsscratch); 632 csr_write(CSR_VSEPC, csr->vsepc); 633 csr_write(CSR_VSCAUSE, csr->vscause); 634 csr_write(CSR_VSTVAL, csr->vstval); 635 csr_write(CSR_HVIP, csr->hvip); 636 csr_write(CSR_VSATP, csr->vsatp); 637 638 kvm_riscv_stage2_update_hgatp(vcpu); 639 640 kvm_riscv_vcpu_timer_restore(vcpu); 641 642 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); 643 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, 644 vcpu->arch.isa); 645 646 vcpu->cpu = cpu; 647 } 648 649 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 650 { 651 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 652 653 vcpu->cpu = -1; 654 655 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, 656 vcpu->arch.isa); 657 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); 658 659 csr->vsstatus = csr_read(CSR_VSSTATUS); 660 csr->vsie = csr_read(CSR_VSIE); 661 csr->vstvec = csr_read(CSR_VSTVEC); 662 csr->vsscratch = csr_read(CSR_VSSCRATCH); 663 csr->vsepc = csr_read(CSR_VSEPC); 664 csr->vscause = csr_read(CSR_VSCAUSE); 665 csr->vstval = csr_read(CSR_VSTVAL); 666 csr->hvip = csr_read(CSR_HVIP); 667 csr->vsatp = csr_read(CSR_VSATP); 668 } 669 670 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) 671 { 672 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 673 674 if (kvm_request_pending(vcpu)) { 675 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) { 676 rcuwait_wait_event(wait, 677 (!vcpu->arch.power_off) && (!vcpu->arch.pause), 678 TASK_INTERRUPTIBLE); 679 680 if (vcpu->arch.power_off || vcpu->arch.pause) { 681 /* 682 * Awaken to handle a signal, request to 683 * sleep again later. 684 */ 685 kvm_make_request(KVM_REQ_SLEEP, vcpu); 686 } 687 } 688 689 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) 690 kvm_riscv_reset_vcpu(vcpu); 691 692 if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu)) 693 kvm_riscv_stage2_update_hgatp(vcpu); 694 695 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) 696 __kvm_riscv_hfence_gvma_all(); 697 } 698 } 699 700 static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) 701 { 702 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 703 704 csr_write(CSR_HVIP, csr->hvip); 705 } 706 707 /* 708 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while 709 * the vCPU is running. 710 * 711 * This must be noinstr as instrumentation may make use of RCU, and this is not 712 * safe during the EQS. 713 */ 714 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu) 715 { 716 guest_state_enter_irqoff(); 717 __kvm_riscv_switch_to(&vcpu->arch); 718 guest_state_exit_irqoff(); 719 } 720 721 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 722 { 723 int ret; 724 struct kvm_cpu_trap trap; 725 struct kvm_run *run = vcpu->run; 726 727 /* Mark this VCPU ran at least once */ 728 vcpu->arch.ran_atleast_once = true; 729 730 kvm_vcpu_srcu_read_lock(vcpu); 731 732 /* Process MMIO value returned from user-space */ 733 if (run->exit_reason == KVM_EXIT_MMIO) { 734 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run); 735 if (ret) { 736 kvm_vcpu_srcu_read_unlock(vcpu); 737 return ret; 738 } 739 } 740 741 /* Process SBI value returned from user-space */ 742 if (run->exit_reason == KVM_EXIT_RISCV_SBI) { 743 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run); 744 if (ret) { 745 kvm_vcpu_srcu_read_unlock(vcpu); 746 return ret; 747 } 748 } 749 750 if (run->immediate_exit) { 751 kvm_vcpu_srcu_read_unlock(vcpu); 752 return -EINTR; 753 } 754 755 vcpu_load(vcpu); 756 757 kvm_sigset_activate(vcpu); 758 759 ret = 1; 760 run->exit_reason = KVM_EXIT_UNKNOWN; 761 while (ret > 0) { 762 /* Check conditions before entering the guest */ 763 cond_resched(); 764 765 kvm_riscv_stage2_vmid_update(vcpu); 766 767 kvm_riscv_check_vcpu_requests(vcpu); 768 769 preempt_disable(); 770 771 local_irq_disable(); 772 773 /* 774 * Exit if we have a signal pending so that we can deliver 775 * the signal to user space. 776 */ 777 if (signal_pending(current)) { 778 ret = -EINTR; 779 run->exit_reason = KVM_EXIT_INTR; 780 } 781 782 /* 783 * Ensure we set mode to IN_GUEST_MODE after we disable 784 * interrupts and before the final VCPU requests check. 785 * See the comment in kvm_vcpu_exiting_guest_mode() and 786 * Documentation/virt/kvm/vcpu-requests.rst 787 */ 788 vcpu->mode = IN_GUEST_MODE; 789 790 kvm_vcpu_srcu_read_unlock(vcpu); 791 smp_mb__after_srcu_read_unlock(); 792 793 /* 794 * We might have got VCPU interrupts updated asynchronously 795 * so update it in HW. 796 */ 797 kvm_riscv_vcpu_flush_interrupts(vcpu); 798 799 /* Update HVIP CSR for current CPU */ 800 kvm_riscv_update_hvip(vcpu); 801 802 if (ret <= 0 || 803 kvm_riscv_stage2_vmid_ver_changed(&vcpu->kvm->arch.vmid) || 804 kvm_request_pending(vcpu)) { 805 vcpu->mode = OUTSIDE_GUEST_MODE; 806 local_irq_enable(); 807 preempt_enable(); 808 kvm_vcpu_srcu_read_lock(vcpu); 809 continue; 810 } 811 812 guest_timing_enter_irqoff(); 813 814 kvm_riscv_vcpu_enter_exit(vcpu); 815 816 vcpu->mode = OUTSIDE_GUEST_MODE; 817 vcpu->stat.exits++; 818 819 /* 820 * Save SCAUSE, STVAL, HTVAL, and HTINST because we might 821 * get an interrupt between __kvm_riscv_switch_to() and 822 * local_irq_enable() which can potentially change CSRs. 823 */ 824 trap.sepc = vcpu->arch.guest_context.sepc; 825 trap.scause = csr_read(CSR_SCAUSE); 826 trap.stval = csr_read(CSR_STVAL); 827 trap.htval = csr_read(CSR_HTVAL); 828 trap.htinst = csr_read(CSR_HTINST); 829 830 /* Syncup interrupts state with HW */ 831 kvm_riscv_vcpu_sync_interrupts(vcpu); 832 833 /* 834 * We must ensure that any pending interrupts are taken before 835 * we exit guest timing so that timer ticks are accounted as 836 * guest time. Transiently unmask interrupts so that any 837 * pending interrupts are taken. 838 * 839 * There's no barrier which ensures that pending interrupts are 840 * recognised, so we just hope that the CPU takes any pending 841 * interrupts between the enable and disable. 842 */ 843 local_irq_enable(); 844 local_irq_disable(); 845 846 guest_timing_exit_irqoff(); 847 848 local_irq_enable(); 849 850 preempt_enable(); 851 852 kvm_vcpu_srcu_read_lock(vcpu); 853 854 ret = kvm_riscv_vcpu_exit(vcpu, run, &trap); 855 } 856 857 kvm_sigset_deactivate(vcpu); 858 859 vcpu_put(vcpu); 860 861 kvm_vcpu_srcu_read_unlock(vcpu); 862 863 return ret; 864 } 865