xref: /openbmc/linux/arch/riscv/include/asm/kvm_aia.h (revision 7f8256ae)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2022 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <apatel@ventanamicro.com>
8  */
9 
10 #ifndef __KVM_RISCV_AIA_H
11 #define __KVM_RISCV_AIA_H
12 
13 #include <linux/jump_label.h>
14 #include <linux/kvm_types.h>
15 #include <asm/csr.h>
16 
17 struct kvm_aia {
18 	/* In-kernel irqchip created */
19 	bool		in_kernel;
20 
21 	/* In-kernel irqchip initialized */
22 	bool		initialized;
23 };
24 
25 struct kvm_vcpu_aia_csr {
26 	unsigned long vsiselect;
27 	unsigned long hviprio1;
28 	unsigned long hviprio2;
29 	unsigned long vsieh;
30 	unsigned long hviph;
31 	unsigned long hviprio1h;
32 	unsigned long hviprio2h;
33 };
34 
35 struct kvm_vcpu_aia {
36 	/* CPU AIA CSR context of Guest VCPU */
37 	struct kvm_vcpu_aia_csr guest_csr;
38 
39 	/* CPU AIA CSR context upon Guest VCPU reset */
40 	struct kvm_vcpu_aia_csr guest_reset_csr;
41 };
42 
43 #define kvm_riscv_aia_initialized(k)	((k)->arch.aia.initialized)
44 
45 #define irqchip_in_kernel(k)		((k)->arch.aia.in_kernel)
46 
47 DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
48 #define kvm_riscv_aia_available() \
49 	static_branch_unlikely(&kvm_riscv_aia_available)
50 
51 #define KVM_RISCV_AIA_IMSIC_TOPEI	(ISELECT_MASK + 1)
52 static inline int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu,
53 					       unsigned long isel,
54 					       unsigned long *val,
55 					       unsigned long new_val,
56 					       unsigned long wr_mask)
57 {
58 	return 0;
59 }
60 
61 #ifdef CONFIG_32BIT
62 void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu);
63 void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu);
64 #else
65 static inline void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
66 {
67 }
68 static inline void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
69 {
70 }
71 #endif
72 bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
73 
74 void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu);
75 void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu);
76 void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu);
77 int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
78 			       unsigned long reg_num,
79 			       unsigned long *out_val);
80 int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
81 			       unsigned long reg_num,
82 			       unsigned long val);
83 
84 int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
85 				 unsigned int csr_num,
86 				 unsigned long *val,
87 				 unsigned long new_val,
88 				 unsigned long wr_mask);
89 int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
90 				unsigned long *val, unsigned long new_val,
91 				unsigned long wr_mask);
92 #define KVM_RISCV_VCPU_AIA_CSR_FUNCS \
93 { .base = CSR_SIREG,      .count = 1, .func = kvm_riscv_vcpu_aia_rmw_ireg }, \
94 { .base = CSR_STOPEI,     .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei },
95 
96 static inline int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
97 {
98 	return 1;
99 }
100 
101 static inline void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
102 {
103 }
104 
105 static inline int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
106 {
107 	return 0;
108 }
109 
110 static inline void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu)
111 {
112 }
113 
114 static inline void kvm_riscv_aia_init_vm(struct kvm *kvm)
115 {
116 }
117 
118 static inline void kvm_riscv_aia_destroy_vm(struct kvm *kvm)
119 {
120 }
121 
122 void kvm_riscv_aia_enable(void);
123 void kvm_riscv_aia_disable(void);
124 int kvm_riscv_aia_init(void);
125 void kvm_riscv_aia_exit(void);
126 
127 #endif
128