1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9 #ifndef __RISCV_KVM_HOST_H__
10 #define __RISCV_KVM_HOST_H__
11
12 #include <linux/types.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_types.h>
15 #include <linux/spinlock.h>
16 #include <asm/hwcap.h>
17 #include <asm/kvm_aia.h>
18 #include <asm/ptrace.h>
19 #include <asm/kvm_vcpu_fp.h>
20 #include <asm/kvm_vcpu_insn.h>
21 #include <asm/kvm_vcpu_sbi.h>
22 #include <asm/kvm_vcpu_timer.h>
23 #include <asm/kvm_vcpu_pmu.h>
24
25 #define KVM_MAX_VCPUS 1024
26
27 #define KVM_HALT_POLL_NS_DEFAULT 500000
28
29 #define KVM_VCPU_MAX_FEATURES 0
30
31 #define KVM_IRQCHIP_NUM_PINS 1024
32
33 #define KVM_REQ_SLEEP \
34 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
35 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1)
36 #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
37 #define KVM_REQ_FENCE_I \
38 KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
39 #define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH
40 #define KVM_REQ_HFENCE_VVMA_ALL \
41 KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
42 #define KVM_REQ_HFENCE \
43 KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
44
45 enum kvm_riscv_hfence_type {
46 KVM_RISCV_HFENCE_UNKNOWN = 0,
47 KVM_RISCV_HFENCE_GVMA_VMID_GPA,
48 KVM_RISCV_HFENCE_VVMA_ASID_GVA,
49 KVM_RISCV_HFENCE_VVMA_ASID_ALL,
50 KVM_RISCV_HFENCE_VVMA_GVA,
51 };
52
53 struct kvm_riscv_hfence {
54 enum kvm_riscv_hfence_type type;
55 unsigned long asid;
56 unsigned long order;
57 gpa_t addr;
58 gpa_t size;
59 };
60
61 #define KVM_RISCV_VCPU_MAX_HFENCE 64
62
63 struct kvm_vm_stat {
64 struct kvm_vm_stat_generic generic;
65 };
66
67 struct kvm_vcpu_stat {
68 struct kvm_vcpu_stat_generic generic;
69 u64 ecall_exit_stat;
70 u64 wfi_exit_stat;
71 u64 mmio_exit_user;
72 u64 mmio_exit_kernel;
73 u64 csr_exit_user;
74 u64 csr_exit_kernel;
75 u64 signal_exits;
76 u64 exits;
77 };
78
79 struct kvm_arch_memory_slot {
80 };
81
82 struct kvm_vmid {
83 /*
84 * Writes to vmid_version and vmid happen with vmid_lock held
85 * whereas reads happen without any lock held.
86 */
87 unsigned long vmid_version;
88 unsigned long vmid;
89 };
90
91 struct kvm_arch {
92 /* G-stage vmid */
93 struct kvm_vmid vmid;
94
95 /* G-stage page table */
96 pgd_t *pgd;
97 phys_addr_t pgd_phys;
98
99 /* Guest Timer */
100 struct kvm_guest_timer timer;
101
102 /* AIA Guest/VM context */
103 struct kvm_aia aia;
104 };
105
106 struct kvm_cpu_trap {
107 unsigned long sepc;
108 unsigned long scause;
109 unsigned long stval;
110 unsigned long htval;
111 unsigned long htinst;
112 };
113
114 struct kvm_cpu_context {
115 unsigned long zero;
116 unsigned long ra;
117 unsigned long sp;
118 unsigned long gp;
119 unsigned long tp;
120 unsigned long t0;
121 unsigned long t1;
122 unsigned long t2;
123 unsigned long s0;
124 unsigned long s1;
125 unsigned long a0;
126 unsigned long a1;
127 unsigned long a2;
128 unsigned long a3;
129 unsigned long a4;
130 unsigned long a5;
131 unsigned long a6;
132 unsigned long a7;
133 unsigned long s2;
134 unsigned long s3;
135 unsigned long s4;
136 unsigned long s5;
137 unsigned long s6;
138 unsigned long s7;
139 unsigned long s8;
140 unsigned long s9;
141 unsigned long s10;
142 unsigned long s11;
143 unsigned long t3;
144 unsigned long t4;
145 unsigned long t5;
146 unsigned long t6;
147 unsigned long sepc;
148 unsigned long sstatus;
149 unsigned long hstatus;
150 union __riscv_fp_state fp;
151 struct __riscv_v_ext_state vector;
152 };
153
154 struct kvm_vcpu_csr {
155 unsigned long vsstatus;
156 unsigned long vsie;
157 unsigned long vstvec;
158 unsigned long vsscratch;
159 unsigned long vsepc;
160 unsigned long vscause;
161 unsigned long vstval;
162 unsigned long hvip;
163 unsigned long vsatp;
164 unsigned long scounteren;
165 };
166
167 struct kvm_vcpu_arch {
168 /* VCPU ran at least once */
169 bool ran_atleast_once;
170
171 /* Last Host CPU on which Guest VCPU exited */
172 int last_exit_cpu;
173
174 /* ISA feature bits (similar to MISA) */
175 DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
176
177 /* Vendor, Arch, and Implementation details */
178 unsigned long mvendorid;
179 unsigned long marchid;
180 unsigned long mimpid;
181
182 /* SSCRATCH, STVEC, and SCOUNTEREN of Host */
183 unsigned long host_sscratch;
184 unsigned long host_stvec;
185 unsigned long host_scounteren;
186
187 /* CPU context of Host */
188 struct kvm_cpu_context host_context;
189
190 /* CPU context of Guest VCPU */
191 struct kvm_cpu_context guest_context;
192
193 /* CPU CSR context of Guest VCPU */
194 struct kvm_vcpu_csr guest_csr;
195
196 /* CPU context upon Guest VCPU reset */
197 struct kvm_cpu_context guest_reset_context;
198
199 /* CPU CSR context upon Guest VCPU reset */
200 struct kvm_vcpu_csr guest_reset_csr;
201
202 /*
203 * VCPU interrupts
204 *
205 * We have a lockless approach for tracking pending VCPU interrupts
206 * implemented using atomic bitops. The irqs_pending bitmap represent
207 * pending interrupts whereas irqs_pending_mask represent bits changed
208 * in irqs_pending. Our approach is modeled around multiple producer
209 * and single consumer problem where the consumer is the VCPU itself.
210 */
211 #define KVM_RISCV_VCPU_NR_IRQS 64
212 DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
213 DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
214
215 /* VCPU Timer */
216 struct kvm_vcpu_timer timer;
217
218 /* HFENCE request queue */
219 spinlock_t hfence_lock;
220 unsigned long hfence_head;
221 unsigned long hfence_tail;
222 struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE];
223
224 /* MMIO instruction details */
225 struct kvm_mmio_decode mmio_decode;
226
227 /* CSR instruction details */
228 struct kvm_csr_decode csr_decode;
229
230 /* SBI context */
231 struct kvm_vcpu_sbi_context sbi_context;
232
233 /* AIA VCPU context */
234 struct kvm_vcpu_aia aia_context;
235
236 /* Cache pages needed to program page tables with spinlock held */
237 struct kvm_mmu_memory_cache mmu_page_cache;
238
239 /* VCPU power state */
240 struct kvm_mp_state mp_state;
241 spinlock_t mp_state_lock;
242
243 /* Don't run the VCPU (blocked) */
244 bool pause;
245
246 /* Performance monitoring context */
247 struct kvm_pmu pmu_context;
248 };
249
kvm_arch_sync_events(struct kvm * kvm)250 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm_arch_sched_in(struct kvm_vcpu * vcpu,int cpu)251 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
252
253 #define KVM_ARCH_WANT_MMU_NOTIFIER
254
255 #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
256
257 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
258 gpa_t gpa, gpa_t gpsz,
259 unsigned long order);
260 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
261 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
262 unsigned long order);
263 void kvm_riscv_local_hfence_gvma_all(void);
264 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
265 unsigned long asid,
266 unsigned long gva,
267 unsigned long gvsz,
268 unsigned long order);
269 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
270 unsigned long asid);
271 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
272 unsigned long gva, unsigned long gvsz,
273 unsigned long order);
274 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
275
276 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
277
278 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
279 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
280 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
281 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
282
283 void kvm_riscv_fence_i(struct kvm *kvm,
284 unsigned long hbase, unsigned long hmask);
285 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
286 unsigned long hbase, unsigned long hmask,
287 gpa_t gpa, gpa_t gpsz,
288 unsigned long order);
289 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
290 unsigned long hbase, unsigned long hmask);
291 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
292 unsigned long hbase, unsigned long hmask,
293 unsigned long gva, unsigned long gvsz,
294 unsigned long order, unsigned long asid);
295 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
296 unsigned long hbase, unsigned long hmask,
297 unsigned long asid);
298 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
299 unsigned long hbase, unsigned long hmask,
300 unsigned long gva, unsigned long gvsz,
301 unsigned long order);
302 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
303 unsigned long hbase, unsigned long hmask);
304
305 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
306 phys_addr_t hpa, unsigned long size,
307 bool writable, bool in_atomic);
308 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
309 unsigned long size);
310 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
311 struct kvm_memory_slot *memslot,
312 gpa_t gpa, unsigned long hva, bool is_write);
313 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
314 void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
315 void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
316 void __init kvm_riscv_gstage_mode_detect(void);
317 unsigned long __init kvm_riscv_gstage_mode(void);
318 int kvm_riscv_gstage_gpa_bits(void);
319
320 void __init kvm_riscv_gstage_vmid_detect(void);
321 unsigned long kvm_riscv_gstage_vmid_bits(void);
322 int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
323 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
324 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
325
326 int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines);
327
328 void __kvm_riscv_unpriv_trap(void);
329
330 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
331 bool read_insn,
332 unsigned long guest_addr,
333 struct kvm_cpu_trap *trap);
334 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
335 struct kvm_cpu_trap *trap);
336 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
337 struct kvm_cpu_trap *trap);
338
339 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
340
341 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu);
342 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu);
343 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
344 u64 __user *uindices);
345 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
346 const struct kvm_one_reg *reg);
347 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
348 const struct kvm_one_reg *reg);
349
350 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
351 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
352 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
353 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
354 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
355 void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
356 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
357 void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
358 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
359 bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu);
360
361 #endif /* __RISCV_KVM_HOST_H__ */
362