1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4 * 5 * Authors: 6 * Anup Patel <anup.patel@wdc.com> 7 */ 8 9 #ifndef __RISCV_KVM_HOST_H__ 10 #define __RISCV_KVM_HOST_H__ 11 12 #include <linux/types.h> 13 #include <linux/kvm.h> 14 #include <linux/kvm_types.h> 15 #include <linux/spinlock.h> 16 #include <asm/csr.h> 17 #include <asm/hwcap.h> 18 #include <asm/kvm_vcpu_fp.h> 19 #include <asm/kvm_vcpu_insn.h> 20 #include <asm/kvm_vcpu_timer.h> 21 22 #define KVM_MAX_VCPUS 1024 23 24 #define KVM_HALT_POLL_NS_DEFAULT 500000 25 26 #define KVM_VCPU_MAX_FEATURES 0 27 28 #define KVM_REQ_SLEEP \ 29 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 30 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1) 31 #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2) 32 #define KVM_REQ_FENCE_I \ 33 KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 34 #define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH 35 #define KVM_REQ_HFENCE_VVMA_ALL \ 36 KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 37 #define KVM_REQ_HFENCE \ 38 KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 39 40 enum kvm_riscv_hfence_type { 41 KVM_RISCV_HFENCE_UNKNOWN = 0, 42 KVM_RISCV_HFENCE_GVMA_VMID_GPA, 43 KVM_RISCV_HFENCE_VVMA_ASID_GVA, 44 KVM_RISCV_HFENCE_VVMA_ASID_ALL, 45 KVM_RISCV_HFENCE_VVMA_GVA, 46 }; 47 48 struct kvm_riscv_hfence { 49 enum kvm_riscv_hfence_type type; 50 unsigned long asid; 51 unsigned long order; 52 gpa_t addr; 53 gpa_t size; 54 }; 55 56 #define KVM_RISCV_VCPU_MAX_HFENCE 64 57 58 struct kvm_vm_stat { 59 struct kvm_vm_stat_generic generic; 60 }; 61 62 struct kvm_vcpu_stat { 63 struct kvm_vcpu_stat_generic generic; 64 u64 ecall_exit_stat; 65 u64 wfi_exit_stat; 66 u64 mmio_exit_user; 67 u64 mmio_exit_kernel; 68 u64 csr_exit_user; 69 u64 csr_exit_kernel; 70 u64 signal_exits; 71 u64 exits; 72 }; 73 74 struct kvm_arch_memory_slot { 75 }; 76 77 struct kvm_vmid { 78 /* 79 * Writes to vmid_version and vmid happen with vmid_lock held 80 * whereas reads happen without any lock held. 81 */ 82 unsigned long vmid_version; 83 unsigned long vmid; 84 }; 85 86 struct kvm_arch { 87 /* G-stage vmid */ 88 struct kvm_vmid vmid; 89 90 /* G-stage page table */ 91 pgd_t *pgd; 92 phys_addr_t pgd_phys; 93 94 /* Guest Timer */ 95 struct kvm_guest_timer timer; 96 }; 97 98 struct kvm_sbi_context { 99 int return_handled; 100 }; 101 102 struct kvm_cpu_trap { 103 unsigned long sepc; 104 unsigned long scause; 105 unsigned long stval; 106 unsigned long htval; 107 unsigned long htinst; 108 }; 109 110 struct kvm_cpu_context { 111 unsigned long zero; 112 unsigned long ra; 113 unsigned long sp; 114 unsigned long gp; 115 unsigned long tp; 116 unsigned long t0; 117 unsigned long t1; 118 unsigned long t2; 119 unsigned long s0; 120 unsigned long s1; 121 unsigned long a0; 122 unsigned long a1; 123 unsigned long a2; 124 unsigned long a3; 125 unsigned long a4; 126 unsigned long a5; 127 unsigned long a6; 128 unsigned long a7; 129 unsigned long s2; 130 unsigned long s3; 131 unsigned long s4; 132 unsigned long s5; 133 unsigned long s6; 134 unsigned long s7; 135 unsigned long s8; 136 unsigned long s9; 137 unsigned long s10; 138 unsigned long s11; 139 unsigned long t3; 140 unsigned long t4; 141 unsigned long t5; 142 unsigned long t6; 143 unsigned long sepc; 144 unsigned long sstatus; 145 unsigned long hstatus; 146 union __riscv_fp_state fp; 147 }; 148 149 struct kvm_vcpu_csr { 150 unsigned long vsstatus; 151 unsigned long vsie; 152 unsigned long vstvec; 153 unsigned long vsscratch; 154 unsigned long vsepc; 155 unsigned long vscause; 156 unsigned long vstval; 157 unsigned long hvip; 158 unsigned long vsatp; 159 unsigned long scounteren; 160 }; 161 162 struct kvm_vcpu_arch { 163 /* VCPU ran at least once */ 164 bool ran_atleast_once; 165 166 /* Last Host CPU on which Guest VCPU exited */ 167 int last_exit_cpu; 168 169 /* ISA feature bits (similar to MISA) */ 170 DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX); 171 172 /* SSCRATCH, STVEC, and SCOUNTEREN of Host */ 173 unsigned long host_sscratch; 174 unsigned long host_stvec; 175 unsigned long host_scounteren; 176 177 /* CPU context of Host */ 178 struct kvm_cpu_context host_context; 179 180 /* CPU context of Guest VCPU */ 181 struct kvm_cpu_context guest_context; 182 183 /* CPU CSR context of Guest VCPU */ 184 struct kvm_vcpu_csr guest_csr; 185 186 /* CPU context upon Guest VCPU reset */ 187 struct kvm_cpu_context guest_reset_context; 188 189 /* CPU CSR context upon Guest VCPU reset */ 190 struct kvm_vcpu_csr guest_reset_csr; 191 192 /* 193 * VCPU interrupts 194 * 195 * We have a lockless approach for tracking pending VCPU interrupts 196 * implemented using atomic bitops. The irqs_pending bitmap represent 197 * pending interrupts whereas irqs_pending_mask represent bits changed 198 * in irqs_pending. Our approach is modeled around multiple producer 199 * and single consumer problem where the consumer is the VCPU itself. 200 */ 201 unsigned long irqs_pending; 202 unsigned long irqs_pending_mask; 203 204 /* VCPU Timer */ 205 struct kvm_vcpu_timer timer; 206 207 /* HFENCE request queue */ 208 spinlock_t hfence_lock; 209 unsigned long hfence_head; 210 unsigned long hfence_tail; 211 struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE]; 212 213 /* MMIO instruction details */ 214 struct kvm_mmio_decode mmio_decode; 215 216 /* CSR instruction details */ 217 struct kvm_csr_decode csr_decode; 218 219 /* SBI context */ 220 struct kvm_sbi_context sbi_context; 221 222 /* Cache pages needed to program page tables with spinlock held */ 223 struct kvm_mmu_memory_cache mmu_page_cache; 224 225 /* VCPU power-off state */ 226 bool power_off; 227 228 /* Don't run the VCPU (blocked) */ 229 bool pause; 230 }; 231 232 static inline void kvm_arch_hardware_unsetup(void) {} 233 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 234 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 235 236 #define KVM_ARCH_WANT_MMU_NOTIFIER 237 238 #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 239 240 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, 241 gpa_t gpa, gpa_t gpsz, 242 unsigned long order); 243 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid); 244 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, 245 unsigned long order); 246 void kvm_riscv_local_hfence_gvma_all(void); 247 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, 248 unsigned long asid, 249 unsigned long gva, 250 unsigned long gvsz, 251 unsigned long order); 252 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid, 253 unsigned long asid); 254 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, 255 unsigned long gva, unsigned long gvsz, 256 unsigned long order); 257 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid); 258 259 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu); 260 261 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu); 262 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu); 263 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu); 264 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu); 265 266 void kvm_riscv_fence_i(struct kvm *kvm, 267 unsigned long hbase, unsigned long hmask); 268 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm, 269 unsigned long hbase, unsigned long hmask, 270 gpa_t gpa, gpa_t gpsz, 271 unsigned long order); 272 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm, 273 unsigned long hbase, unsigned long hmask); 274 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm, 275 unsigned long hbase, unsigned long hmask, 276 unsigned long gva, unsigned long gvsz, 277 unsigned long order, unsigned long asid); 278 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm, 279 unsigned long hbase, unsigned long hmask, 280 unsigned long asid); 281 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm, 282 unsigned long hbase, unsigned long hmask, 283 unsigned long gva, unsigned long gvsz, 284 unsigned long order); 285 void kvm_riscv_hfence_vvma_all(struct kvm *kvm, 286 unsigned long hbase, unsigned long hmask); 287 288 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, 289 phys_addr_t hpa, unsigned long size, 290 bool writable, bool in_atomic); 291 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, 292 unsigned long size); 293 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, 294 struct kvm_memory_slot *memslot, 295 gpa_t gpa, unsigned long hva, bool is_write); 296 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm); 297 void kvm_riscv_gstage_free_pgd(struct kvm *kvm); 298 void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu); 299 void kvm_riscv_gstage_mode_detect(void); 300 unsigned long kvm_riscv_gstage_mode(void); 301 int kvm_riscv_gstage_gpa_bits(void); 302 303 void kvm_riscv_gstage_vmid_detect(void); 304 unsigned long kvm_riscv_gstage_vmid_bits(void); 305 int kvm_riscv_gstage_vmid_init(struct kvm *kvm); 306 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid); 307 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu); 308 309 void __kvm_riscv_unpriv_trap(void); 310 311 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, 312 bool read_insn, 313 unsigned long guest_addr, 314 struct kvm_cpu_trap *trap); 315 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu, 316 struct kvm_cpu_trap *trap); 317 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 318 struct kvm_cpu_trap *trap); 319 320 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch); 321 322 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); 323 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); 324 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu); 325 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu); 326 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask); 327 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); 328 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); 329 330 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run); 331 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run); 332 333 #endif /* __RISCV_KVM_HOST_H__ */ 334