xref: /openbmc/linux/arch/riscv/include/asm/kvm_host.h (revision f71a261a)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <anup.patel@wdc.com>
7  */
8 
9 #ifndef __RISCV_KVM_HOST_H__
10 #define __RISCV_KVM_HOST_H__
11 
12 #include <linux/types.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_types.h>
15 #include <linux/spinlock.h>
16 #include <asm/csr.h>
17 #include <asm/kvm_vcpu_fp.h>
18 #include <asm/kvm_vcpu_timer.h>
19 
20 #define KVM_MAX_VCPUS			1024
21 
22 #define KVM_HALT_POLL_NS_DEFAULT	500000
23 
24 #define KVM_VCPU_MAX_FEATURES		0
25 
26 #define KVM_REQ_SLEEP \
27 	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
28 #define KVM_REQ_VCPU_RESET		KVM_ARCH_REQ(1)
29 #define KVM_REQ_UPDATE_HGATP		KVM_ARCH_REQ(2)
30 #define KVM_REQ_FENCE_I			\
31 	KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
32 #define KVM_REQ_HFENCE_GVMA_VMID_ALL	KVM_REQ_TLB_FLUSH
33 #define KVM_REQ_HFENCE_VVMA_ALL		\
34 	KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
35 #define KVM_REQ_HFENCE			\
36 	KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
37 
38 enum kvm_riscv_hfence_type {
39 	KVM_RISCV_HFENCE_UNKNOWN = 0,
40 	KVM_RISCV_HFENCE_GVMA_VMID_GPA,
41 	KVM_RISCV_HFENCE_VVMA_ASID_GVA,
42 	KVM_RISCV_HFENCE_VVMA_ASID_ALL,
43 	KVM_RISCV_HFENCE_VVMA_GVA,
44 };
45 
46 struct kvm_riscv_hfence {
47 	enum kvm_riscv_hfence_type type;
48 	unsigned long asid;
49 	unsigned long order;
50 	gpa_t addr;
51 	gpa_t size;
52 };
53 
54 #define KVM_RISCV_VCPU_MAX_HFENCE	64
55 
56 struct kvm_vm_stat {
57 	struct kvm_vm_stat_generic generic;
58 };
59 
60 struct kvm_vcpu_stat {
61 	struct kvm_vcpu_stat_generic generic;
62 	u64 ecall_exit_stat;
63 	u64 wfi_exit_stat;
64 	u64 mmio_exit_user;
65 	u64 mmio_exit_kernel;
66 	u64 exits;
67 };
68 
69 struct kvm_arch_memory_slot {
70 };
71 
72 struct kvm_vmid {
73 	/*
74 	 * Writes to vmid_version and vmid happen with vmid_lock held
75 	 * whereas reads happen without any lock held.
76 	 */
77 	unsigned long vmid_version;
78 	unsigned long vmid;
79 };
80 
81 struct kvm_arch {
82 	/* G-stage vmid */
83 	struct kvm_vmid vmid;
84 
85 	/* G-stage page table */
86 	pgd_t *pgd;
87 	phys_addr_t pgd_phys;
88 
89 	/* Guest Timer */
90 	struct kvm_guest_timer timer;
91 };
92 
93 struct kvm_mmio_decode {
94 	unsigned long insn;
95 	int insn_len;
96 	int len;
97 	int shift;
98 	int return_handled;
99 };
100 
101 struct kvm_sbi_context {
102 	int return_handled;
103 };
104 
105 struct kvm_cpu_trap {
106 	unsigned long sepc;
107 	unsigned long scause;
108 	unsigned long stval;
109 	unsigned long htval;
110 	unsigned long htinst;
111 };
112 
113 struct kvm_cpu_context {
114 	unsigned long zero;
115 	unsigned long ra;
116 	unsigned long sp;
117 	unsigned long gp;
118 	unsigned long tp;
119 	unsigned long t0;
120 	unsigned long t1;
121 	unsigned long t2;
122 	unsigned long s0;
123 	unsigned long s1;
124 	unsigned long a0;
125 	unsigned long a1;
126 	unsigned long a2;
127 	unsigned long a3;
128 	unsigned long a4;
129 	unsigned long a5;
130 	unsigned long a6;
131 	unsigned long a7;
132 	unsigned long s2;
133 	unsigned long s3;
134 	unsigned long s4;
135 	unsigned long s5;
136 	unsigned long s6;
137 	unsigned long s7;
138 	unsigned long s8;
139 	unsigned long s9;
140 	unsigned long s10;
141 	unsigned long s11;
142 	unsigned long t3;
143 	unsigned long t4;
144 	unsigned long t5;
145 	unsigned long t6;
146 	unsigned long sepc;
147 	unsigned long sstatus;
148 	unsigned long hstatus;
149 	union __riscv_fp_state fp;
150 };
151 
152 struct kvm_vcpu_csr {
153 	unsigned long vsstatus;
154 	unsigned long vsie;
155 	unsigned long vstvec;
156 	unsigned long vsscratch;
157 	unsigned long vsepc;
158 	unsigned long vscause;
159 	unsigned long vstval;
160 	unsigned long hvip;
161 	unsigned long vsatp;
162 	unsigned long scounteren;
163 };
164 
165 struct kvm_vcpu_arch {
166 	/* VCPU ran at least once */
167 	bool ran_atleast_once;
168 
169 	/* Last Host CPU on which Guest VCPU exited */
170 	int last_exit_cpu;
171 
172 	/* ISA feature bits (similar to MISA) */
173 	unsigned long isa;
174 
175 	/* SSCRATCH, STVEC, and SCOUNTEREN of Host */
176 	unsigned long host_sscratch;
177 	unsigned long host_stvec;
178 	unsigned long host_scounteren;
179 
180 	/* CPU context of Host */
181 	struct kvm_cpu_context host_context;
182 
183 	/* CPU context of Guest VCPU */
184 	struct kvm_cpu_context guest_context;
185 
186 	/* CPU CSR context of Guest VCPU */
187 	struct kvm_vcpu_csr guest_csr;
188 
189 	/* CPU context upon Guest VCPU reset */
190 	struct kvm_cpu_context guest_reset_context;
191 
192 	/* CPU CSR context upon Guest VCPU reset */
193 	struct kvm_vcpu_csr guest_reset_csr;
194 
195 	/*
196 	 * VCPU interrupts
197 	 *
198 	 * We have a lockless approach for tracking pending VCPU interrupts
199 	 * implemented using atomic bitops. The irqs_pending bitmap represent
200 	 * pending interrupts whereas irqs_pending_mask represent bits changed
201 	 * in irqs_pending. Our approach is modeled around multiple producer
202 	 * and single consumer problem where the consumer is the VCPU itself.
203 	 */
204 	unsigned long irqs_pending;
205 	unsigned long irqs_pending_mask;
206 
207 	/* VCPU Timer */
208 	struct kvm_vcpu_timer timer;
209 
210 	/* HFENCE request queue */
211 	spinlock_t hfence_lock;
212 	unsigned long hfence_head;
213 	unsigned long hfence_tail;
214 	struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE];
215 
216 	/* MMIO instruction details */
217 	struct kvm_mmio_decode mmio_decode;
218 
219 	/* SBI context */
220 	struct kvm_sbi_context sbi_context;
221 
222 	/* Cache pages needed to program page tables with spinlock held */
223 	struct kvm_mmu_memory_cache mmu_page_cache;
224 
225 	/* VCPU power-off state */
226 	bool power_off;
227 
228 	/* Don't run the VCPU (blocked) */
229 	bool pause;
230 };
231 
232 static inline void kvm_arch_hardware_unsetup(void) {}
233 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
234 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
235 
236 #define KVM_ARCH_WANT_MMU_NOTIFIER
237 
238 #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER		12
239 
240 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
241 					  gpa_t gpa, gpa_t gpsz,
242 					  unsigned long order);
243 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
244 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
245 				     unsigned long order);
246 void kvm_riscv_local_hfence_gvma_all(void);
247 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
248 					  unsigned long asid,
249 					  unsigned long gva,
250 					  unsigned long gvsz,
251 					  unsigned long order);
252 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
253 					  unsigned long asid);
254 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
255 				     unsigned long gva, unsigned long gvsz,
256 				     unsigned long order);
257 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
258 
259 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
260 
261 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
262 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
263 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
264 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
265 
266 void kvm_riscv_fence_i(struct kvm *kvm,
267 		       unsigned long hbase, unsigned long hmask);
268 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
269 				    unsigned long hbase, unsigned long hmask,
270 				    gpa_t gpa, gpa_t gpsz,
271 				    unsigned long order);
272 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
273 				    unsigned long hbase, unsigned long hmask);
274 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
275 				    unsigned long hbase, unsigned long hmask,
276 				    unsigned long gva, unsigned long gvsz,
277 				    unsigned long order, unsigned long asid);
278 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
279 				    unsigned long hbase, unsigned long hmask,
280 				    unsigned long asid);
281 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
282 			       unsigned long hbase, unsigned long hmask,
283 			       unsigned long gva, unsigned long gvsz,
284 			       unsigned long order);
285 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
286 			       unsigned long hbase, unsigned long hmask);
287 
288 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
289 			 struct kvm_memory_slot *memslot,
290 			 gpa_t gpa, unsigned long hva, bool is_write);
291 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
292 void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
293 void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
294 void kvm_riscv_gstage_mode_detect(void);
295 unsigned long kvm_riscv_gstage_mode(void);
296 int kvm_riscv_gstage_gpa_bits(void);
297 
298 void kvm_riscv_gstage_vmid_detect(void);
299 unsigned long kvm_riscv_gstage_vmid_bits(void);
300 int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
301 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
302 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
303 
304 void __kvm_riscv_unpriv_trap(void);
305 
306 void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu);
307 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
308 					 bool read_insn,
309 					 unsigned long guest_addr,
310 					 struct kvm_cpu_trap *trap);
311 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
312 				  struct kvm_cpu_trap *trap);
313 int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
314 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
315 			struct kvm_cpu_trap *trap);
316 
317 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
318 
319 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
320 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
321 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
322 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
323 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask);
324 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
325 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
326 
327 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
328 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
329 
330 #endif /* __RISCV_KVM_HOST_H__ */
331