1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 7 * Authors: Sanjay Lal <sanjayl@kymasys.com> 8 */ 9 10 #ifndef __MIPS_KVM_HOST_H__ 11 #define __MIPS_KVM_HOST_H__ 12 13 #include <linux/cpumask.h> 14 #include <linux/mutex.h> 15 #include <linux/hrtimer.h> 16 #include <linux/interrupt.h> 17 #include <linux/types.h> 18 #include <linux/kvm.h> 19 #include <linux/kvm_types.h> 20 #include <linux/threads.h> 21 #include <linux/spinlock.h> 22 23 #include <asm/inst.h> 24 #include <asm/mipsregs.h> 25 26 #include <kvm/iodev.h> 27 28 /* MIPS KVM register ids */ 29 #define MIPS_CP0_32(_R, _S) \ 30 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) 31 32 #define MIPS_CP0_64(_R, _S) \ 33 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) 34 35 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) 36 #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) 37 #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) 38 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) 39 #define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1) 40 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) 41 #define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3) 42 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) 43 #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) 44 #define KVM_REG_MIPS_CP0_SEGCTL0 MIPS_CP0_64(5, 2) 45 #define KVM_REG_MIPS_CP0_SEGCTL1 MIPS_CP0_64(5, 3) 46 #define KVM_REG_MIPS_CP0_SEGCTL2 MIPS_CP0_64(5, 4) 47 #define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5) 48 #define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6) 49 #define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7) 50 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) 51 #define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6) 52 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) 53 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) 54 #define KVM_REG_MIPS_CP0_BADINSTR MIPS_CP0_32(8, 1) 55 #define KVM_REG_MIPS_CP0_BADINSTRP MIPS_CP0_32(8, 2) 56 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) 57 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) 58 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) 59 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) 60 #define KVM_REG_MIPS_CP0_INTCTL MIPS_CP0_32(12, 1) 61 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) 62 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) 63 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) 64 #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) 65 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) 66 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) 67 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) 68 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) 69 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) 70 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) 71 #define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6) 72 #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) 73 #define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2) 74 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) 75 #define KVM_REG_MIPS_CP0_DIAG MIPS_CP0_32(22, 0) 76 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) 77 #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2) 78 #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3) 79 #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4) 80 #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5) 81 #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6) 82 #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7) 83 84 85 #define KVM_MAX_VCPUS 16 86 #define KVM_USER_MEM_SLOTS 16 87 /* memory slots that does not exposed to userspace */ 88 #define KVM_PRIVATE_MEM_SLOTS 0 89 90 #define KVM_HALT_POLL_NS_DEFAULT 500000 91 92 #ifdef CONFIG_KVM_MIPS_VZ 93 extern unsigned long GUESTID_MASK; 94 extern unsigned long GUESTID_FIRST_VERSION; 95 extern unsigned long GUESTID_VERSION_MASK; 96 #endif 97 98 99 /* 100 * Special address that contains the comm page, used for reducing # of traps 101 * This needs to be within 32Kb of 0x0 (so the zero register can be used), but 102 * preferably not at 0x0 so that most kernel NULL pointer dereferences can be 103 * caught. 104 */ 105 #define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \ 106 (0x8000 - PAGE_SIZE)) 107 108 #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \ 109 ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) 110 111 #define KVM_GUEST_KUSEG 0x00000000UL 112 #define KVM_GUEST_KSEG0 0x40000000UL 113 #define KVM_GUEST_KSEG1 0x40000000UL 114 #define KVM_GUEST_KSEG23 0x60000000UL 115 #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000) 116 #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) 117 118 #define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) 119 #define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) 120 #define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) 121 122 /* 123 * Map an address to a certain kernel segment 124 */ 125 #define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) 126 #define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) 127 #define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) 128 129 #define KVM_INVALID_PAGE 0xdeadbeef 130 #define KVM_INVALID_ADDR 0xdeadbeef 131 132 /* 133 * EVA has overlapping user & kernel address spaces, so user VAs may be > 134 * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of 135 * PAGE_OFFSET. 136 */ 137 138 #define KVM_HVA_ERR_BAD (-1UL) 139 #define KVM_HVA_ERR_RO_BAD (-2UL) 140 141 static inline bool kvm_is_error_hva(unsigned long addr) 142 { 143 return IS_ERR_VALUE(addr); 144 } 145 146 struct kvm_vm_stat { 147 ulong remote_tlb_flush; 148 }; 149 150 struct kvm_vcpu_stat { 151 u64 wait_exits; 152 u64 cache_exits; 153 u64 signal_exits; 154 u64 int_exits; 155 u64 cop_unusable_exits; 156 u64 tlbmod_exits; 157 u64 tlbmiss_ld_exits; 158 u64 tlbmiss_st_exits; 159 u64 addrerr_st_exits; 160 u64 addrerr_ld_exits; 161 u64 syscall_exits; 162 u64 resvd_inst_exits; 163 u64 break_inst_exits; 164 u64 trap_inst_exits; 165 u64 msa_fpe_exits; 166 u64 fpe_exits; 167 u64 msa_disabled_exits; 168 u64 flush_dcache_exits; 169 #ifdef CONFIG_KVM_MIPS_VZ 170 u64 vz_gpsi_exits; 171 u64 vz_gsfc_exits; 172 u64 vz_hc_exits; 173 u64 vz_grr_exits; 174 u64 vz_gva_exits; 175 u64 vz_ghfc_exits; 176 u64 vz_gpa_exits; 177 u64 vz_resvd_exits; 178 #ifdef CONFIG_CPU_LOONGSON64 179 u64 vz_cpucfg_exits; 180 #endif 181 #endif 182 u64 halt_successful_poll; 183 u64 halt_attempted_poll; 184 u64 halt_poll_success_ns; 185 u64 halt_poll_fail_ns; 186 u64 halt_poll_invalid; 187 u64 halt_wakeup; 188 }; 189 190 struct kvm_arch_memory_slot { 191 }; 192 193 #ifdef CONFIG_CPU_LOONGSON64 194 struct ipi_state { 195 uint32_t status; 196 uint32_t en; 197 uint32_t set; 198 uint32_t clear; 199 uint64_t buf[4]; 200 }; 201 202 struct loongson_kvm_ipi; 203 204 struct ipi_io_device { 205 int node_id; 206 struct loongson_kvm_ipi *ipi; 207 struct kvm_io_device device; 208 }; 209 210 struct loongson_kvm_ipi { 211 spinlock_t lock; 212 struct kvm *kvm; 213 struct ipi_state ipistate[16]; 214 struct ipi_io_device dev_ipi[4]; 215 }; 216 #endif 217 218 struct kvm_arch { 219 /* Guest physical mm */ 220 struct mm_struct gpa_mm; 221 /* Mask of CPUs needing GPA ASID flush */ 222 cpumask_t asid_flush_mask; 223 #ifdef CONFIG_CPU_LOONGSON64 224 struct loongson_kvm_ipi ipi; 225 #endif 226 }; 227 228 #define N_MIPS_COPROC_REGS 32 229 #define N_MIPS_COPROC_SEL 8 230 231 struct mips_coproc { 232 unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; 233 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 234 unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; 235 #endif 236 }; 237 238 /* 239 * Coprocessor 0 register names 240 */ 241 #define MIPS_CP0_TLB_INDEX 0 242 #define MIPS_CP0_TLB_RANDOM 1 243 #define MIPS_CP0_TLB_LOW 2 244 #define MIPS_CP0_TLB_LO0 2 245 #define MIPS_CP0_TLB_LO1 3 246 #define MIPS_CP0_TLB_CONTEXT 4 247 #define MIPS_CP0_TLB_PG_MASK 5 248 #define MIPS_CP0_TLB_WIRED 6 249 #define MIPS_CP0_HWRENA 7 250 #define MIPS_CP0_BAD_VADDR 8 251 #define MIPS_CP0_COUNT 9 252 #define MIPS_CP0_TLB_HI 10 253 #define MIPS_CP0_COMPARE 11 254 #define MIPS_CP0_STATUS 12 255 #define MIPS_CP0_CAUSE 13 256 #define MIPS_CP0_EXC_PC 14 257 #define MIPS_CP0_PRID 15 258 #define MIPS_CP0_CONFIG 16 259 #define MIPS_CP0_LLADDR 17 260 #define MIPS_CP0_WATCH_LO 18 261 #define MIPS_CP0_WATCH_HI 19 262 #define MIPS_CP0_TLB_XCONTEXT 20 263 #define MIPS_CP0_DIAG 22 264 #define MIPS_CP0_ECC 26 265 #define MIPS_CP0_CACHE_ERR 27 266 #define MIPS_CP0_TAG_LO 28 267 #define MIPS_CP0_TAG_HI 29 268 #define MIPS_CP0_ERROR_PC 30 269 #define MIPS_CP0_DEBUG 23 270 #define MIPS_CP0_DEPC 24 271 #define MIPS_CP0_PERFCNT 25 272 #define MIPS_CP0_ERRCTL 26 273 #define MIPS_CP0_DATA_LO 28 274 #define MIPS_CP0_DATA_HI 29 275 #define MIPS_CP0_DESAVE 31 276 277 #define MIPS_CP0_CONFIG_SEL 0 278 #define MIPS_CP0_CONFIG1_SEL 1 279 #define MIPS_CP0_CONFIG2_SEL 2 280 #define MIPS_CP0_CONFIG3_SEL 3 281 #define MIPS_CP0_CONFIG4_SEL 4 282 #define MIPS_CP0_CONFIG5_SEL 5 283 284 #define MIPS_CP0_GUESTCTL2 10 285 #define MIPS_CP0_GUESTCTL2_SEL 5 286 #define MIPS_CP0_GTOFFSET 12 287 #define MIPS_CP0_GTOFFSET_SEL 7 288 289 /* Resume Flags */ 290 #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */ 291 #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 292 293 #define RESUME_GUEST 0 294 #define RESUME_GUEST_DR RESUME_FLAG_DR 295 #define RESUME_HOST RESUME_FLAG_HOST 296 297 enum emulation_result { 298 EMULATE_DONE, /* no further processing */ 299 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 300 EMULATE_FAIL, /* can't emulate this instruction */ 301 EMULATE_WAIT, /* WAIT instruction */ 302 EMULATE_PRIV_FAIL, 303 EMULATE_EXCEPT, /* A guest exception has been generated */ 304 EMULATE_HYPERCALL, /* HYPCALL instruction */ 305 }; 306 307 #define mips3_paddr_to_tlbpfn(x) \ 308 (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME) 309 #define mips3_tlbpfn_to_paddr(x) \ 310 ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT) 311 312 #define MIPS3_PG_SHIFT 6 313 #define MIPS3_PG_FRAME 0x3fffffc0 314 315 #if defined(CONFIG_64BIT) 316 #define VPN2_MASK GENMASK(cpu_vmbits - 1, 13) 317 #else 318 #define VPN2_MASK 0xffffe000 319 #endif 320 #define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data) 321 #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) 322 #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) 323 #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) 324 #define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1) 325 #define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V) 326 #define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D) 327 #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \ 328 ((y) & VPN2_MASK & ~(x).tlb_mask)) 329 #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \ 330 TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID)) 331 332 struct kvm_mips_tlb { 333 long tlb_mask; 334 long tlb_hi; 335 long tlb_lo[2]; 336 }; 337 338 #define KVM_NR_MEM_OBJS 4 339 340 /* 341 * We don't want allocation failures within the mmu code, so we preallocate 342 * enough memory for a single page fault in a cache. 343 */ 344 struct kvm_mmu_memory_cache { 345 int nobjs; 346 void *objects[KVM_NR_MEM_OBJS]; 347 }; 348 349 #define KVM_MIPS_AUX_FPU 0x1 350 #define KVM_MIPS_AUX_MSA 0x2 351 352 #define KVM_MIPS_GUEST_TLB_SIZE 64 353 struct kvm_vcpu_arch { 354 void *guest_ebase; 355 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); 356 357 /* Host registers preserved across guest mode execution */ 358 unsigned long host_stack; 359 unsigned long host_gp; 360 unsigned long host_pgd; 361 unsigned long host_entryhi; 362 363 /* Host CP0 registers used when handling exits from guest */ 364 unsigned long host_cp0_badvaddr; 365 unsigned long host_cp0_epc; 366 u32 host_cp0_cause; 367 u32 host_cp0_guestctl0; 368 u32 host_cp0_badinstr; 369 u32 host_cp0_badinstrp; 370 371 /* GPRS */ 372 unsigned long gprs[32]; 373 unsigned long hi; 374 unsigned long lo; 375 unsigned long pc; 376 377 /* FPU State */ 378 struct mips_fpu_struct fpu; 379 /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */ 380 unsigned int aux_inuse; 381 382 /* COP0 State */ 383 struct mips_coproc *cop0; 384 385 /* Host KSEG0 address of the EI/DI offset */ 386 void *kseg0_commpage; 387 388 /* Resume PC after MMIO completion */ 389 unsigned long io_pc; 390 /* GPR used as IO source/target */ 391 u32 io_gpr; 392 393 struct hrtimer comparecount_timer; 394 /* Count timer control KVM register */ 395 u32 count_ctl; 396 /* Count bias from the raw time */ 397 u32 count_bias; 398 /* Frequency of timer in Hz */ 399 u32 count_hz; 400 /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */ 401 s64 count_dyn_bias; 402 /* Resume time */ 403 ktime_t count_resume; 404 /* Period of timer tick in ns */ 405 u64 count_period; 406 407 /* Bitmask of exceptions that are pending */ 408 unsigned long pending_exceptions; 409 410 /* Bitmask of pending exceptions to be cleared */ 411 unsigned long pending_exceptions_clr; 412 413 /* S/W Based TLB for guest */ 414 struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; 415 416 /* Guest kernel/user [partial] mm */ 417 struct mm_struct guest_kernel_mm, guest_user_mm; 418 419 /* Guest ASID of last user mode execution */ 420 unsigned int last_user_gasid; 421 422 /* Cache some mmu pages needed inside spinlock regions */ 423 struct kvm_mmu_memory_cache mmu_page_cache; 424 425 #ifdef CONFIG_KVM_MIPS_VZ 426 /* vcpu's vzguestid is different on each host cpu in an smp system */ 427 u32 vzguestid[NR_CPUS]; 428 429 /* wired guest TLB entries */ 430 struct kvm_mips_tlb *wired_tlb; 431 unsigned int wired_tlb_limit; 432 unsigned int wired_tlb_used; 433 434 /* emulated guest MAAR registers */ 435 unsigned long maar[6]; 436 #endif 437 438 /* Last CPU the VCPU state was loaded on */ 439 int last_sched_cpu; 440 /* Last CPU the VCPU actually executed guest code on */ 441 int last_exec_cpu; 442 443 /* WAIT executed */ 444 int wait; 445 446 u8 fpu_enabled; 447 u8 msa_enabled; 448 }; 449 450 static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg, 451 unsigned long val) 452 { 453 unsigned long temp; 454 do { 455 __asm__ __volatile__( 456 " .set push \n" 457 " .set "MIPS_ISA_ARCH_LEVEL" \n" 458 " " __LL "%0, %1 \n" 459 " or %0, %2 \n" 460 " " __SC "%0, %1 \n" 461 " .set pop \n" 462 : "=&r" (temp), "+m" (*reg) 463 : "r" (val)); 464 } while (unlikely(!temp)); 465 } 466 467 static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg, 468 unsigned long val) 469 { 470 unsigned long temp; 471 do { 472 __asm__ __volatile__( 473 " .set push \n" 474 " .set "MIPS_ISA_ARCH_LEVEL" \n" 475 " " __LL "%0, %1 \n" 476 " and %0, %2 \n" 477 " " __SC "%0, %1 \n" 478 " .set pop \n" 479 : "=&r" (temp), "+m" (*reg) 480 : "r" (~val)); 481 } while (unlikely(!temp)); 482 } 483 484 static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, 485 unsigned long change, 486 unsigned long val) 487 { 488 unsigned long temp; 489 do { 490 __asm__ __volatile__( 491 " .set push \n" 492 " .set "MIPS_ISA_ARCH_LEVEL" \n" 493 " " __LL "%0, %1 \n" 494 " and %0, %2 \n" 495 " or %0, %3 \n" 496 " " __SC "%0, %1 \n" 497 " .set pop \n" 498 : "=&r" (temp), "+m" (*reg) 499 : "r" (~change), "r" (val & change)); 500 } while (unlikely(!temp)); 501 } 502 503 /* Guest register types, used in accessor build below */ 504 #define __KVMT32 u32 505 #define __KVMTl unsigned long 506 507 /* 508 * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg() 509 * These operate on the saved guest C0 state in RAM. 510 */ 511 512 /* Generate saved context simple accessors */ 513 #define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \ 514 static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \ 515 { \ 516 return cop0->reg[(_reg)][(sel)]; \ 517 } \ 518 static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \ 519 __KVMT##type val) \ 520 { \ 521 cop0->reg[(_reg)][(sel)] = val; \ 522 } 523 524 /* Generate saved context bitwise modifiers */ 525 #define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \ 526 static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \ 527 __KVMT##type val) \ 528 { \ 529 cop0->reg[(_reg)][(sel)] |= val; \ 530 } \ 531 static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \ 532 __KVMT##type val) \ 533 { \ 534 cop0->reg[(_reg)][(sel)] &= ~val; \ 535 } \ 536 static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \ 537 __KVMT##type mask, \ 538 __KVMT##type val) \ 539 { \ 540 unsigned long _mask = mask; \ 541 cop0->reg[(_reg)][(sel)] &= ~_mask; \ 542 cop0->reg[(_reg)][(sel)] |= val & _mask; \ 543 } 544 545 /* Generate saved context atomic bitwise modifiers */ 546 #define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \ 547 static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \ 548 __KVMT##type val) \ 549 { \ 550 _kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \ 551 } \ 552 static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \ 553 __KVMT##type val) \ 554 { \ 555 _kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \ 556 } \ 557 static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \ 558 __KVMT##type mask, \ 559 __KVMT##type val) \ 560 { \ 561 _kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \ 562 val); \ 563 } 564 565 /* 566 * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg() 567 * These operate on the VZ guest C0 context in hardware. 568 */ 569 570 /* Generate VZ guest context simple accessors */ 571 #define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \ 572 static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \ 573 { \ 574 return read_gc0_##name(); \ 575 } \ 576 static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \ 577 __KVMT##type val) \ 578 { \ 579 write_gc0_##name(val); \ 580 } 581 582 /* Generate VZ guest context bitwise modifiers */ 583 #define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \ 584 static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \ 585 __KVMT##type val) \ 586 { \ 587 set_gc0_##name(val); \ 588 } \ 589 static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \ 590 __KVMT##type val) \ 591 { \ 592 clear_gc0_##name(val); \ 593 } \ 594 static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \ 595 __KVMT##type mask, \ 596 __KVMT##type val) \ 597 { \ 598 change_gc0_##name(mask, val); \ 599 } 600 601 /* Generate VZ guest context save/restore to/from saved context */ 602 #define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \ 603 static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \ 604 { \ 605 write_gc0_##name(cop0->reg[(_reg)][(sel)]); \ 606 } \ 607 static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \ 608 { \ 609 cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \ 610 } 611 612 /* 613 * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2() 614 * These wrap a set of operations to provide them with a different name. 615 */ 616 617 /* Generate simple accessor wrapper */ 618 #define __BUILD_KVM_RW_WRAP(name1, name2, type) \ 619 static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \ 620 { \ 621 return kvm_read_##name2(cop0); \ 622 } \ 623 static inline void kvm_write_##name1(struct mips_coproc *cop0, \ 624 __KVMT##type val) \ 625 { \ 626 kvm_write_##name2(cop0, val); \ 627 } 628 629 /* Generate bitwise modifier wrapper */ 630 #define __BUILD_KVM_SET_WRAP(name1, name2, type) \ 631 static inline void kvm_set_##name1(struct mips_coproc *cop0, \ 632 __KVMT##type val) \ 633 { \ 634 kvm_set_##name2(cop0, val); \ 635 } \ 636 static inline void kvm_clear_##name1(struct mips_coproc *cop0, \ 637 __KVMT##type val) \ 638 { \ 639 kvm_clear_##name2(cop0, val); \ 640 } \ 641 static inline void kvm_change_##name1(struct mips_coproc *cop0, \ 642 __KVMT##type mask, \ 643 __KVMT##type val) \ 644 { \ 645 kvm_change_##name2(cop0, mask, val); \ 646 } 647 648 /* 649 * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg() 650 * These generate accessors operating on the saved context in RAM, and wrap them 651 * with the common guest C0 accessors (for use by common emulation code). 652 */ 653 654 #define __BUILD_KVM_RW_SW(name, type, _reg, sel) \ 655 __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \ 656 __BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type) 657 658 #define __BUILD_KVM_SET_SW(name, type, _reg, sel) \ 659 __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \ 660 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type) 661 662 #define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \ 663 __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \ 664 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type) 665 666 #ifndef CONFIG_KVM_MIPS_VZ 667 668 /* 669 * T&E (trap & emulate software based virtualisation) 670 * We generate the common accessors operating exclusively on the saved context 671 * in RAM. 672 */ 673 674 #define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW 675 #define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW 676 #define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW 677 678 #else 679 680 /* 681 * VZ (hardware assisted virtualisation) 682 * These macros use the active guest state in VZ mode (hardware registers), 683 */ 684 685 /* 686 * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg() 687 * These generate accessors operating on the VZ guest context in hardware, and 688 * wrap them with the common guest C0 accessors (for use by common emulation 689 * code). 690 * 691 * Accessors operating on the saved context in RAM are also generated to allow 692 * convenient explicit saving and restoring of the state. 693 */ 694 695 #define __BUILD_KVM_RW_HW(name, type, _reg, sel) \ 696 __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \ 697 __BUILD_KVM_RW_VZ(name, type, _reg, sel) \ 698 __BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \ 699 __BUILD_KVM_SAVE_VZ(name, _reg, sel) 700 701 #define __BUILD_KVM_SET_HW(name, type, _reg, sel) \ 702 __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \ 703 __BUILD_KVM_SET_VZ(name, type, _reg, sel) \ 704 __BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type) 705 706 /* 707 * We can't do atomic modifications of COP0 state if hardware can modify it. 708 * Races must be handled explicitly. 709 */ 710 #define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW 711 712 #endif 713 714 /* 715 * Define accessors for CP0 registers that are accessible to the guest. These 716 * are primarily used by common emulation code, which may need to access the 717 * registers differently depending on the implementation. 718 * 719 * fns_hw/sw name type reg num select 720 */ 721 __BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0) 722 __BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0) 723 __BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0) 724 __BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0) 725 __BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1) 726 __BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2) 727 __BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3) 728 __BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0) 729 __BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1) 730 __BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2) 731 __BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3) 732 __BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4) 733 __BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5) 734 __BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6) 735 __BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7) 736 __BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0) 737 __BUILD_KVM_RW_HW(pwctl, 32, MIPS_CP0_TLB_WIRED, 6) 738 __BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0) 739 __BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0) 740 __BUILD_KVM_RW_HW(badinstr, 32, MIPS_CP0_BAD_VADDR, 1) 741 __BUILD_KVM_RW_HW(badinstrp, 32, MIPS_CP0_BAD_VADDR, 2) 742 __BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0) 743 __BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0) 744 __BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0) 745 __BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0) 746 __BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1) 747 __BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0) 748 __BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0) 749 __BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0) 750 __BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1) 751 __BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0) 752 __BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1) 753 __BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2) 754 __BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3) 755 __BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4) 756 __BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5) 757 __BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6) 758 __BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7) 759 __BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2) 760 __BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0) 761 __BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0) 762 __BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2) 763 __BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3) 764 __BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4) 765 __BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5) 766 __BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6) 767 __BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7) 768 769 /* Bitwise operations (on HW state) */ 770 __BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0) 771 /* Cause can be modified asynchronously from hardirq hrtimer callback */ 772 __BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0) 773 __BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1) 774 775 /* Bitwise operations (on saved state) */ 776 __BUILD_KVM_SET_SAVED(config, 32, MIPS_CP0_CONFIG, 0) 777 __BUILD_KVM_SET_SAVED(config1, 32, MIPS_CP0_CONFIG, 1) 778 __BUILD_KVM_SET_SAVED(config2, 32, MIPS_CP0_CONFIG, 2) 779 __BUILD_KVM_SET_SAVED(config3, 32, MIPS_CP0_CONFIG, 3) 780 __BUILD_KVM_SET_SAVED(config4, 32, MIPS_CP0_CONFIG, 4) 781 __BUILD_KVM_SET_SAVED(config5, 32, MIPS_CP0_CONFIG, 5) 782 783 /* Helpers */ 784 785 static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu) 786 { 787 return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) && 788 vcpu->fpu_enabled; 789 } 790 791 static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu) 792 { 793 return kvm_mips_guest_can_have_fpu(vcpu) && 794 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP; 795 } 796 797 static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu) 798 { 799 return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) && 800 vcpu->msa_enabled; 801 } 802 803 static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu) 804 { 805 return kvm_mips_guest_can_have_msa(vcpu) && 806 kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA; 807 } 808 809 struct kvm_mips_callbacks { 810 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); 811 int (*handle_tlb_mod)(struct kvm_vcpu *vcpu); 812 int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu); 813 int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu); 814 int (*handle_addr_err_st)(struct kvm_vcpu *vcpu); 815 int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu); 816 int (*handle_syscall)(struct kvm_vcpu *vcpu); 817 int (*handle_res_inst)(struct kvm_vcpu *vcpu); 818 int (*handle_break)(struct kvm_vcpu *vcpu); 819 int (*handle_trap)(struct kvm_vcpu *vcpu); 820 int (*handle_msa_fpe)(struct kvm_vcpu *vcpu); 821 int (*handle_fpe)(struct kvm_vcpu *vcpu); 822 int (*handle_msa_disabled)(struct kvm_vcpu *vcpu); 823 int (*handle_guest_exit)(struct kvm_vcpu *vcpu); 824 int (*hardware_enable)(void); 825 void (*hardware_disable)(void); 826 int (*check_extension)(struct kvm *kvm, long ext); 827 int (*vcpu_init)(struct kvm_vcpu *vcpu); 828 void (*vcpu_uninit)(struct kvm_vcpu *vcpu); 829 int (*vcpu_setup)(struct kvm_vcpu *vcpu); 830 void (*flush_shadow_all)(struct kvm *kvm); 831 /* 832 * Must take care of flushing any cached GPA PTEs (e.g. guest entries in 833 * VZ root TLB, or T&E GVA page tables and corresponding root TLB 834 * mappings). 835 */ 836 void (*flush_shadow_memslot)(struct kvm *kvm, 837 const struct kvm_memory_slot *slot); 838 gpa_t (*gva_to_gpa)(gva_t gva); 839 void (*queue_timer_int)(struct kvm_vcpu *vcpu); 840 void (*dequeue_timer_int)(struct kvm_vcpu *vcpu); 841 void (*queue_io_int)(struct kvm_vcpu *vcpu, 842 struct kvm_mips_interrupt *irq); 843 void (*dequeue_io_int)(struct kvm_vcpu *vcpu, 844 struct kvm_mips_interrupt *irq); 845 int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority, 846 u32 cause); 847 int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority, 848 u32 cause); 849 unsigned long (*num_regs)(struct kvm_vcpu *vcpu); 850 int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices); 851 int (*get_one_reg)(struct kvm_vcpu *vcpu, 852 const struct kvm_one_reg *reg, s64 *v); 853 int (*set_one_reg)(struct kvm_vcpu *vcpu, 854 const struct kvm_one_reg *reg, s64 v); 855 int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 856 int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu); 857 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); 858 void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu); 859 }; 860 extern struct kvm_mips_callbacks *kvm_mips_callbacks; 861 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); 862 863 /* Debug: dump vcpu state */ 864 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); 865 866 extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu); 867 868 /* Building of entry/exception code */ 869 int kvm_mips_entry_setup(void); 870 void *kvm_mips_build_vcpu_run(void *addr); 871 void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler); 872 void *kvm_mips_build_exception(void *addr, void *handler); 873 void *kvm_mips_build_exit(void *addr); 874 875 /* FPU/MSA context management */ 876 void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu); 877 void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu); 878 void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu); 879 void __kvm_save_msa(struct kvm_vcpu_arch *vcpu); 880 void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu); 881 void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu); 882 void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu); 883 void kvm_own_fpu(struct kvm_vcpu *vcpu); 884 void kvm_own_msa(struct kvm_vcpu *vcpu); 885 void kvm_drop_fpu(struct kvm_vcpu *vcpu); 886 void kvm_lose_fpu(struct kvm_vcpu *vcpu); 887 888 /* TLB handling */ 889 u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu); 890 891 u32 kvm_get_user_asid(struct kvm_vcpu *vcpu); 892 893 u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu); 894 895 #ifdef CONFIG_KVM_MIPS_VZ 896 int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr, 897 struct kvm_vcpu *vcpu, bool write_fault); 898 #endif 899 extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, 900 struct kvm_vcpu *vcpu, 901 bool write_fault); 902 903 extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, 904 struct kvm_vcpu *vcpu); 905 906 extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, 907 struct kvm_mips_tlb *tlb, 908 unsigned long gva, 909 bool write_fault); 910 911 extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, 912 u32 *opc, 913 struct kvm_run *run, 914 struct kvm_vcpu *vcpu, 915 bool write_fault); 916 917 extern void kvm_mips_dump_host_tlbs(void); 918 extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); 919 extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi, 920 bool user, bool kernel); 921 922 extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, 923 unsigned long entryhi); 924 925 #ifdef CONFIG_KVM_MIPS_VZ 926 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); 927 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, 928 unsigned long *gpa); 929 void kvm_vz_local_flush_roottlb_all_guests(void); 930 void kvm_vz_local_flush_guesttlb_all(void); 931 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index, 932 unsigned int count); 933 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index, 934 unsigned int count); 935 #ifdef CONFIG_CPU_LOONGSON64 936 void kvm_loongson_clear_guest_vtlb(void); 937 void kvm_loongson_clear_guest_ftlb(void); 938 #endif 939 #endif 940 941 void kvm_mips_suspend_mm(int cpu); 942 void kvm_mips_resume_mm(int cpu); 943 944 /* MMU handling */ 945 946 /** 947 * enum kvm_mips_flush - Types of MMU flushes. 948 * @KMF_USER: Flush guest user virtual memory mappings. 949 * Guest USeg only. 950 * @KMF_KERN: Flush guest kernel virtual memory mappings. 951 * Guest USeg and KSeg2/3. 952 * @KMF_GPA: Flush guest physical memory mappings. 953 * Also includes KSeg0 if KMF_KERN is set. 954 */ 955 enum kvm_mips_flush { 956 KMF_USER = 0x0, 957 KMF_KERN = 0x1, 958 KMF_GPA = 0x2, 959 }; 960 void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags); 961 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); 962 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); 963 pgd_t *kvm_pgd_alloc(void); 964 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 965 void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, 966 bool user); 967 void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu); 968 void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu); 969 970 enum kvm_mips_fault_result { 971 KVM_MIPS_MAPPED = 0, 972 KVM_MIPS_GVA, 973 KVM_MIPS_GPA, 974 KVM_MIPS_TLB, 975 KVM_MIPS_TLBINV, 976 KVM_MIPS_TLBMOD, 977 }; 978 enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, 979 unsigned long gva, 980 bool write); 981 982 #define KVM_ARCH_WANT_MMU_NOTIFIER 983 int kvm_unmap_hva_range(struct kvm *kvm, 984 unsigned long start, unsigned long end); 985 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 986 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 987 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 988 989 /* Emulation */ 990 int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); 991 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); 992 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); 993 int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); 994 995 /** 996 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. 997 * @vcpu: Virtual CPU. 998 * 999 * Returns: Whether the TLBL exception was likely due to an instruction 1000 * fetch fault rather than a data load fault. 1001 */ 1002 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu) 1003 { 1004 unsigned long badvaddr = vcpu->host_cp0_badvaddr; 1005 unsigned long epc = msk_isa16_mode(vcpu->pc); 1006 u32 cause = vcpu->host_cp0_cause; 1007 1008 if (epc == badvaddr) 1009 return true; 1010 1011 /* 1012 * Branches may be 32-bit or 16-bit instructions. 1013 * This isn't exact, but we don't really support MIPS16 or microMIPS yet 1014 * in KVM anyway. 1015 */ 1016 if ((cause & CAUSEF_BD) && badvaddr - epc <= 4) 1017 return true; 1018 1019 return false; 1020 } 1021 1022 extern enum emulation_result kvm_mips_emulate_inst(u32 cause, 1023 u32 *opc, 1024 struct kvm_run *run, 1025 struct kvm_vcpu *vcpu); 1026 1027 long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu); 1028 1029 extern enum emulation_result kvm_mips_emulate_syscall(u32 cause, 1030 u32 *opc, 1031 struct kvm_run *run, 1032 struct kvm_vcpu *vcpu); 1033 1034 extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, 1035 u32 *opc, 1036 struct kvm_run *run, 1037 struct kvm_vcpu *vcpu); 1038 1039 extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, 1040 u32 *opc, 1041 struct kvm_run *run, 1042 struct kvm_vcpu *vcpu); 1043 1044 extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, 1045 u32 *opc, 1046 struct kvm_run *run, 1047 struct kvm_vcpu *vcpu); 1048 1049 extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, 1050 u32 *opc, 1051 struct kvm_run *run, 1052 struct kvm_vcpu *vcpu); 1053 1054 extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, 1055 u32 *opc, 1056 struct kvm_run *run, 1057 struct kvm_vcpu *vcpu); 1058 1059 extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, 1060 u32 *opc, 1061 struct kvm_run *run, 1062 struct kvm_vcpu *vcpu); 1063 1064 extern enum emulation_result kvm_mips_handle_ri(u32 cause, 1065 u32 *opc, 1066 struct kvm_run *run, 1067 struct kvm_vcpu *vcpu); 1068 1069 extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, 1070 u32 *opc, 1071 struct kvm_run *run, 1072 struct kvm_vcpu *vcpu); 1073 1074 extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, 1075 u32 *opc, 1076 struct kvm_run *run, 1077 struct kvm_vcpu *vcpu); 1078 1079 extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, 1080 u32 *opc, 1081 struct kvm_run *run, 1082 struct kvm_vcpu *vcpu); 1083 1084 extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, 1085 u32 *opc, 1086 struct kvm_run *run, 1087 struct kvm_vcpu *vcpu); 1088 1089 extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, 1090 u32 *opc, 1091 struct kvm_run *run, 1092 struct kvm_vcpu *vcpu); 1093 1094 extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, 1095 u32 *opc, 1096 struct kvm_run *run, 1097 struct kvm_vcpu *vcpu); 1098 1099 extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, 1100 struct kvm_run *run); 1101 1102 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu); 1103 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count); 1104 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack); 1105 void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz); 1106 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl); 1107 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume); 1108 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz); 1109 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu); 1110 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu); 1111 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu); 1112 1113 /* fairly internal functions requiring some care to use */ 1114 int kvm_mips_count_disabled(struct kvm_vcpu *vcpu); 1115 ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count); 1116 int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before, 1117 u32 count, int min_drift); 1118 1119 #ifdef CONFIG_KVM_MIPS_VZ 1120 void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu); 1121 void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu); 1122 #else 1123 static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {} 1124 static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {} 1125 #endif 1126 1127 enum emulation_result kvm_mips_check_privilege(u32 cause, 1128 u32 *opc, 1129 struct kvm_run *run, 1130 struct kvm_vcpu *vcpu); 1131 1132 enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, 1133 u32 *opc, 1134 u32 cause, 1135 struct kvm_run *run, 1136 struct kvm_vcpu *vcpu); 1137 enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, 1138 u32 *opc, 1139 u32 cause, 1140 struct kvm_run *run, 1141 struct kvm_vcpu *vcpu); 1142 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, 1143 u32 cause, 1144 struct kvm_run *run, 1145 struct kvm_vcpu *vcpu); 1146 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, 1147 u32 cause, 1148 struct kvm_run *run, 1149 struct kvm_vcpu *vcpu); 1150 1151 /* COP0 */ 1152 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu); 1153 1154 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu); 1155 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu); 1156 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu); 1157 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu); 1158 1159 /* Hypercalls (hypcall.c) */ 1160 1161 enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu, 1162 union mips_instruction inst); 1163 int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu); 1164 1165 /* Dynamic binary translation */ 1166 extern int kvm_mips_trans_cache_index(union mips_instruction inst, 1167 u32 *opc, struct kvm_vcpu *vcpu); 1168 extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc, 1169 struct kvm_vcpu *vcpu); 1170 extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc, 1171 struct kvm_vcpu *vcpu); 1172 extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc, 1173 struct kvm_vcpu *vcpu); 1174 1175 /* Misc */ 1176 extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); 1177 extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); 1178 extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 1179 struct kvm_mips_interrupt *irq); 1180 1181 static inline void kvm_arch_hardware_unsetup(void) {} 1182 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 1183 static inline void kvm_arch_free_memslot(struct kvm *kvm, 1184 struct kvm_memory_slot *slot) {} 1185 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} 1186 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 1187 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} 1188 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} 1189 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 1190 1191 #endif /* __MIPS_KVM_HOST_H__ */ 1192