Lines Matching refs:arch

47 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;  in kvm_riscv_reset_vcpu()
48 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; in kvm_riscv_reset_vcpu()
49 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_reset_vcpu()
50 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; in kvm_riscv_reset_vcpu()
63 vcpu->arch.last_exit_cpu = -1; in kvm_riscv_reset_vcpu()
77 bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS); in kvm_riscv_reset_vcpu()
78 bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS); in kvm_riscv_reset_vcpu()
82 vcpu->arch.hfence_head = 0; in kvm_riscv_reset_vcpu()
83 vcpu->arch.hfence_tail = 0; in kvm_riscv_reset_vcpu()
84 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); in kvm_riscv_reset_vcpu()
101 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; in kvm_arch_vcpu_create()
104 vcpu->arch.ran_atleast_once = false; in kvm_arch_vcpu_create()
105 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; in kvm_arch_vcpu_create()
106 bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX); in kvm_arch_vcpu_create()
112 vcpu->arch.mvendorid = sbi_get_mvendorid(); in kvm_arch_vcpu_create()
113 vcpu->arch.marchid = sbi_get_marchid(); in kvm_arch_vcpu_create()
114 vcpu->arch.mimpid = sbi_get_mimpid(); in kvm_arch_vcpu_create()
117 spin_lock_init(&vcpu->arch.hfence_lock); in kvm_arch_vcpu_create()
120 cntx = &vcpu->arch.guest_reset_context; in kvm_arch_vcpu_create()
172 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
196 !vcpu->arch.power_off && !vcpu->arch.pause); in kvm_arch_vcpu_runnable()
206 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; in kvm_arch_vcpu_in_kernel()
322 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_flush_interrupts()
325 if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) { in kvm_riscv_vcpu_flush_interrupts()
326 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0); in kvm_riscv_vcpu_flush_interrupts()
327 val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask; in kvm_riscv_vcpu_flush_interrupts()
340 struct kvm_vcpu_arch *v = &vcpu->arch; in kvm_riscv_vcpu_sync_interrupts()
341 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_sync_interrupts()
380 set_bit(irq, vcpu->arch.irqs_pending); in kvm_riscv_vcpu_set_interrupt()
382 set_bit(irq, vcpu->arch.irqs_pending_mask); in kvm_riscv_vcpu_set_interrupt()
402 clear_bit(irq, vcpu->arch.irqs_pending); in kvm_riscv_vcpu_unset_interrupt()
404 set_bit(irq, vcpu->arch.irqs_pending_mask); in kvm_riscv_vcpu_unset_interrupt()
413 ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK) in kvm_riscv_vcpu_has_interrupts()
415 ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK & in kvm_riscv_vcpu_has_interrupts()
417 if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie) in kvm_riscv_vcpu_has_interrupts()
426 vcpu->arch.power_off = true; in kvm_riscv_vcpu_power_off()
433 vcpu->arch.power_off = false; in kvm_riscv_vcpu_power_on()
440 if (vcpu->arch.power_off) in kvm_arch_vcpu_ioctl_get_mpstate()
455 vcpu->arch.power_off = false; in kvm_arch_vcpu_ioctl_set_mpstate()
498 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_arch_vcpu_load()
510 kvm_riscv_vcpu_update_config(vcpu->arch.isa); in kvm_arch_vcpu_load()
516 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); in kvm_arch_vcpu_load()
517 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load()
518 vcpu->arch.isa); in kvm_arch_vcpu_load()
519 kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context); in kvm_arch_vcpu_load()
520 kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load()
521 vcpu->arch.isa); in kvm_arch_vcpu_load()
530 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_arch_vcpu_put()
536 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put()
537 vcpu->arch.isa); in kvm_arch_vcpu_put()
538 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); in kvm_arch_vcpu_put()
541 kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put()
542 vcpu->arch.isa); in kvm_arch_vcpu_put()
543 kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context); in kvm_arch_vcpu_put()
564 (!vcpu->arch.power_off) && (!vcpu->arch.pause), in kvm_riscv_check_vcpu_requests()
568 if (vcpu->arch.power_off || vcpu->arch.pause) { in kvm_riscv_check_vcpu_requests()
603 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_update_hvip()
619 __kvm_riscv_switch_to(&vcpu->arch); in kvm_riscv_vcpu_enter_exit()
620 vcpu->arch.last_exit_cpu = vcpu->cpu; in kvm_riscv_vcpu_enter_exit()
631 vcpu->arch.ran_atleast_once = true; in kvm_arch_vcpu_ioctl_run()
711 kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || in kvm_arch_vcpu_ioctl_run()
741 trap.sepc = vcpu->arch.guest_context.sepc; in kvm_arch_vcpu_ioctl_run()