10a86512dSAnup Patel /* SPDX-License-Identifier: GPL-2.0-only */
20a86512dSAnup Patel /*
30a86512dSAnup Patel * Copyright (C) 2021 Western Digital Corporation or its affiliates.
40a86512dSAnup Patel *
50a86512dSAnup Patel * Authors:
60a86512dSAnup Patel * Atish Patra <atish.patra@wdc.com>
70a86512dSAnup Patel * Anup Patel <anup.patel@wdc.com>
80a86512dSAnup Patel */
90a86512dSAnup Patel
100a86512dSAnup Patel #ifndef __KVM_VCPU_RISCV_FP_H
110a86512dSAnup Patel #define __KVM_VCPU_RISCV_FP_H
120a86512dSAnup Patel
130a86512dSAnup Patel #include <linux/types.h>
140a86512dSAnup Patel
150a86512dSAnup Patel struct kvm_cpu_context;
160a86512dSAnup Patel
170a86512dSAnup Patel #ifdef CONFIG_FPU
180a86512dSAnup Patel void __kvm_riscv_fp_f_save(struct kvm_cpu_context *context);
190a86512dSAnup Patel void __kvm_riscv_fp_f_restore(struct kvm_cpu_context *context);
200a86512dSAnup Patel void __kvm_riscv_fp_d_save(struct kvm_cpu_context *context);
210a86512dSAnup Patel void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context);
220a86512dSAnup Patel
230a86512dSAnup Patel void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu);
240a86512dSAnup Patel void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
25*9bfd900bSAtish Patra const unsigned long *isa);
260a86512dSAnup Patel void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
27*9bfd900bSAtish Patra const unsigned long *isa);
280a86512dSAnup Patel void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx);
290a86512dSAnup Patel void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx);
300a86512dSAnup Patel #else
kvm_riscv_vcpu_fp_reset(struct kvm_vcpu * vcpu)310a86512dSAnup Patel static inline void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
320a86512dSAnup Patel {
330a86512dSAnup Patel }
kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context * cntx,const unsigned long * isa)340a86512dSAnup Patel static inline void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
35*9bfd900bSAtish Patra const unsigned long *isa)
360a86512dSAnup Patel {
370a86512dSAnup Patel }
kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context * cntx,const unsigned long * isa)380a86512dSAnup Patel static inline void kvm_riscv_vcpu_guest_fp_restore(
390a86512dSAnup Patel struct kvm_cpu_context *cntx,
40*9bfd900bSAtish Patra const unsigned long *isa)
410a86512dSAnup Patel {
420a86512dSAnup Patel }
kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context * cntx)430a86512dSAnup Patel static inline void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
440a86512dSAnup Patel {
450a86512dSAnup Patel }
kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context * cntx)460a86512dSAnup Patel static inline void kvm_riscv_vcpu_host_fp_restore(
470a86512dSAnup Patel struct kvm_cpu_context *cntx)
480a86512dSAnup Patel {
490a86512dSAnup Patel }
500a86512dSAnup Patel #endif
510a86512dSAnup Patel
520a86512dSAnup Patel int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
530a86512dSAnup Patel const struct kvm_one_reg *reg,
540a86512dSAnup Patel unsigned long rtype);
550a86512dSAnup Patel int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
560a86512dSAnup Patel const struct kvm_one_reg *reg,
570a86512dSAnup Patel unsigned long rtype);
580a86512dSAnup Patel
590a86512dSAnup Patel #endif
60