Lines Matching +full:sync +full:- +full:update +full:- +full:mask
1 // SPDX-License-Identifier: GPL-2.0
10 #include <linux/entry-kvm.h>
47 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_reset_vcpu()
48 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; in kvm_riscv_reset_vcpu()
49 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_reset_vcpu()
50 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; in kvm_riscv_reset_vcpu()
59 loaded = (vcpu->cpu != -1); in kvm_riscv_reset_vcpu()
63 vcpu->arch.last_exit_cpu = -1; in kvm_riscv_reset_vcpu()
77 bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS); in kvm_riscv_reset_vcpu()
78 bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS); in kvm_riscv_reset_vcpu()
82 vcpu->arch.hfence_head = 0; in kvm_riscv_reset_vcpu()
83 vcpu->arch.hfence_tail = 0; in kvm_riscv_reset_vcpu()
84 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); in kvm_riscv_reset_vcpu()
101 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; in kvm_arch_vcpu_create()
103 spin_lock_init(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_create()
106 vcpu->arch.ran_atleast_once = false; in kvm_arch_vcpu_create()
107 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; in kvm_arch_vcpu_create()
108 bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX); in kvm_arch_vcpu_create()
114 vcpu->arch.mvendorid = sbi_get_mvendorid(); in kvm_arch_vcpu_create()
115 vcpu->arch.marchid = sbi_get_marchid(); in kvm_arch_vcpu_create()
116 vcpu->arch.mimpid = sbi_get_mimpid(); in kvm_arch_vcpu_create()
119 spin_lock_init(&vcpu->arch.hfence_lock); in kvm_arch_vcpu_create()
122 cntx = &vcpu->arch.guest_reset_context; in kvm_arch_vcpu_create()
123 cntx->sstatus = SR_SPP | SR_SPIE; in kvm_arch_vcpu_create()
124 cntx->hstatus = 0; in kvm_arch_vcpu_create()
125 cntx->hstatus |= HSTATUS_VTW; in kvm_arch_vcpu_create()
126 cntx->hstatus |= HSTATUS_SPVP; in kvm_arch_vcpu_create()
127 cntx->hstatus |= HSTATUS_SPV; in kvm_arch_vcpu_create()
130 return -ENOMEM; in kvm_arch_vcpu_create()
133 reset_csr->scounteren = 0x7; in kvm_arch_vcpu_create()
156 * Keep all vcpus with non-zero id in power-off state so that in kvm_arch_vcpu_postcreate()
159 if (vcpu->vcpu_idx != 0) in kvm_arch_vcpu_postcreate()
173 /* Free unused pages pre-allocated for G-stage page table mappings */ in kvm_arch_vcpu_destroy()
174 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
197 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) && in kvm_arch_vcpu_runnable()
198 !kvm_riscv_vcpu_stopped(vcpu) && !vcpu->arch.pause); in kvm_arch_vcpu_runnable()
208 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; in kvm_arch_vcpu_in_kernel()
219 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
226 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
234 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
240 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
242 long r = -EINVAL; in kvm_arch_vcpu_ioctl()
249 r = -EFAULT; in kvm_arch_vcpu_ioctl()
264 r = -EFAULT; in kvm_arch_vcpu_ioctl()
271 r = -E2BIG; in kvm_arch_vcpu_ioctl()
274 r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
287 return -EINVAL; in kvm_arch_vcpu_ioctl_get_sregs()
293 return -EINVAL; in kvm_arch_vcpu_ioctl_set_sregs()
298 return -EINVAL; in kvm_arch_vcpu_ioctl_get_fpu()
303 return -EINVAL; in kvm_arch_vcpu_ioctl_set_fpu()
309 return -EINVAL; in kvm_arch_vcpu_ioctl_translate()
314 return -EINVAL; in kvm_arch_vcpu_ioctl_get_regs()
319 return -EINVAL; in kvm_arch_vcpu_ioctl_set_regs()
324 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_flush_interrupts()
325 unsigned long mask, val; in kvm_riscv_vcpu_flush_interrupts() local
327 if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) { in kvm_riscv_vcpu_flush_interrupts()
328 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0); in kvm_riscv_vcpu_flush_interrupts()
329 val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask; in kvm_riscv_vcpu_flush_interrupts()
331 csr->hvip &= ~mask; in kvm_riscv_vcpu_flush_interrupts()
332 csr->hvip |= val; in kvm_riscv_vcpu_flush_interrupts()
342 struct kvm_vcpu_arch *v = &vcpu->arch; in kvm_riscv_vcpu_sync_interrupts()
343 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_sync_interrupts()
346 csr->vsie = csr_read(CSR_VSIE); in kvm_riscv_vcpu_sync_interrupts()
348 /* Sync-up HVIP.VSSIP bit changes does by Guest */ in kvm_riscv_vcpu_sync_interrupts()
350 if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) { in kvm_riscv_vcpu_sync_interrupts()
353 v->irqs_pending_mask)) in kvm_riscv_vcpu_sync_interrupts()
354 set_bit(IRQ_VS_SOFT, v->irqs_pending); in kvm_riscv_vcpu_sync_interrupts()
357 v->irqs_pending_mask)) in kvm_riscv_vcpu_sync_interrupts()
358 clear_bit(IRQ_VS_SOFT, v->irqs_pending); in kvm_riscv_vcpu_sync_interrupts()
362 /* Sync-up AIA high interrupts */ in kvm_riscv_vcpu_sync_interrupts()
365 /* Sync-up timer CSRs */ in kvm_riscv_vcpu_sync_interrupts()
372 * We only allow VS-mode software, timer, and external in kvm_riscv_vcpu_set_interrupt()
374 * defined by RISC-V privilege specification. in kvm_riscv_vcpu_set_interrupt()
380 return -EINVAL; in kvm_riscv_vcpu_set_interrupt()
382 set_bit(irq, vcpu->arch.irqs_pending); in kvm_riscv_vcpu_set_interrupt()
384 set_bit(irq, vcpu->arch.irqs_pending_mask); in kvm_riscv_vcpu_set_interrupt()
394 * We only allow VS-mode software, timer, and external in kvm_riscv_vcpu_unset_interrupt()
396 * defined by RISC-V privilege specification. in kvm_riscv_vcpu_unset_interrupt()
402 return -EINVAL; in kvm_riscv_vcpu_unset_interrupt()
404 clear_bit(irq, vcpu->arch.irqs_pending); in kvm_riscv_vcpu_unset_interrupt()
406 set_bit(irq, vcpu->arch.irqs_pending_mask); in kvm_riscv_vcpu_unset_interrupt()
411 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask) in kvm_riscv_vcpu_has_interrupts() argument
415 ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK) in kvm_riscv_vcpu_has_interrupts()
416 << VSIP_TO_HVIP_SHIFT) & (unsigned long)mask; in kvm_riscv_vcpu_has_interrupts()
417 ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK & in kvm_riscv_vcpu_has_interrupts()
418 (unsigned long)mask; in kvm_riscv_vcpu_has_interrupts()
419 if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie) in kvm_riscv_vcpu_has_interrupts()
423 return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask); in kvm_riscv_vcpu_has_interrupts()
428 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); in __kvm_riscv_vcpu_power_off()
435 spin_lock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_off()
437 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_off()
442 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); in __kvm_riscv_vcpu_power_on()
448 spin_lock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_on()
450 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_on()
455 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED; in kvm_riscv_vcpu_stopped()
461 *mp_state = READ_ONCE(vcpu->arch.mp_state); in kvm_arch_vcpu_ioctl_get_mpstate()
471 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
473 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
475 WRITE_ONCE(vcpu->arch.mp_state, *mp_state); in kvm_arch_vcpu_ioctl_set_mpstate()
481 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
484 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
493 return -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
520 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_arch_vcpu_load()
522 csr_write(CSR_VSSTATUS, csr->vsstatus); in kvm_arch_vcpu_load()
523 csr_write(CSR_VSIE, csr->vsie); in kvm_arch_vcpu_load()
524 csr_write(CSR_VSTVEC, csr->vstvec); in kvm_arch_vcpu_load()
525 csr_write(CSR_VSSCRATCH, csr->vsscratch); in kvm_arch_vcpu_load()
526 csr_write(CSR_VSEPC, csr->vsepc); in kvm_arch_vcpu_load()
527 csr_write(CSR_VSCAUSE, csr->vscause); in kvm_arch_vcpu_load()
528 csr_write(CSR_VSTVAL, csr->vstval); in kvm_arch_vcpu_load()
529 csr_write(CSR_HVIP, csr->hvip); in kvm_arch_vcpu_load()
530 csr_write(CSR_VSATP, csr->vsatp); in kvm_arch_vcpu_load()
532 kvm_riscv_vcpu_update_config(vcpu->arch.isa); in kvm_arch_vcpu_load()
538 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); in kvm_arch_vcpu_load()
539 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load()
540 vcpu->arch.isa); in kvm_arch_vcpu_load()
541 kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context); in kvm_arch_vcpu_load()
542 kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load()
543 vcpu->arch.isa); in kvm_arch_vcpu_load()
547 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
552 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_arch_vcpu_put()
554 vcpu->cpu = -1; in kvm_arch_vcpu_put()
558 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put()
559 vcpu->arch.isa); in kvm_arch_vcpu_put()
560 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); in kvm_arch_vcpu_put()
563 kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put()
564 vcpu->arch.isa); in kvm_arch_vcpu_put()
565 kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context); in kvm_arch_vcpu_put()
567 csr->vsstatus = csr_read(CSR_VSSTATUS); in kvm_arch_vcpu_put()
568 csr->vsie = csr_read(CSR_VSIE); in kvm_arch_vcpu_put()
569 csr->vstvec = csr_read(CSR_VSTVEC); in kvm_arch_vcpu_put()
570 csr->vsscratch = csr_read(CSR_VSSCRATCH); in kvm_arch_vcpu_put()
571 csr->vsepc = csr_read(CSR_VSEPC); in kvm_arch_vcpu_put()
572 csr->vscause = csr_read(CSR_VSCAUSE); in kvm_arch_vcpu_put()
573 csr->vstval = csr_read(CSR_VSTVAL); in kvm_arch_vcpu_put()
574 csr->hvip = csr_read(CSR_HVIP); in kvm_arch_vcpu_put()
575 csr->vsatp = csr_read(CSR_VSATP); in kvm_arch_vcpu_put()
586 (!kvm_riscv_vcpu_stopped(vcpu)) && (!vcpu->arch.pause), in kvm_riscv_check_vcpu_requests()
590 if (kvm_riscv_vcpu_stopped(vcpu) || vcpu->arch.pause) { in kvm_riscv_check_vcpu_requests()
625 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_update_hvip()
627 csr_write(CSR_HVIP, csr->hvip); in kvm_riscv_update_hvip()
641 __kvm_riscv_switch_to(&vcpu->arch); in kvm_riscv_vcpu_enter_exit()
642 vcpu->arch.last_exit_cpu = vcpu->cpu; in kvm_riscv_vcpu_enter_exit()
650 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
653 vcpu->arch.ran_atleast_once = true; in kvm_arch_vcpu_ioctl_run()
657 switch (run->exit_reason) { in kvm_arch_vcpu_ioctl_run()
659 /* Process MMIO value returned from user-space */ in kvm_arch_vcpu_ioctl_run()
660 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
663 /* Process SBI value returned from user-space */ in kvm_arch_vcpu_ioctl_run()
664 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
667 /* Process CSR value returned from user-space */ in kvm_arch_vcpu_ioctl_run()
668 ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
679 if (run->immediate_exit) { in kvm_arch_vcpu_ioctl_run()
681 return -EINTR; in kvm_arch_vcpu_ioctl_run()
689 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_arch_vcpu_ioctl_run()
703 /* Update AIA HW state before entering guest */ in kvm_arch_vcpu_ioctl_run()
716 * Documentation/virt/kvm/vcpu-requests.rst in kvm_arch_vcpu_ioctl_run()
718 vcpu->mode = IN_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
725 * so update it in HW. in kvm_arch_vcpu_ioctl_run()
729 /* Update HVIP CSR for current CPU */ in kvm_arch_vcpu_ioctl_run()
733 kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || in kvm_arch_vcpu_ioctl_run()
736 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
746 * Note: This should be done after G-stage VMID has been in kvm_arch_vcpu_ioctl_run()
755 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
756 vcpu->stat.exits++; in kvm_arch_vcpu_ioctl_run()
763 trap.sepc = vcpu->arch.guest_context.sepc; in kvm_arch_vcpu_ioctl_run()