1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 7 * Authors: Sanjay Lal <sanjayl@kymasys.com> 8 */ 9 10 #ifndef __MIPS_KVM_HOST_H__ 11 #define __MIPS_KVM_HOST_H__ 12 13 #include <linux/cpumask.h> 14 #include <linux/mutex.h> 15 #include <linux/hrtimer.h> 16 #include <linux/interrupt.h> 17 #include <linux/types.h> 18 #include <linux/kvm.h> 19 #include <linux/kvm_types.h> 20 #include <linux/threads.h> 21 #include <linux/spinlock.h> 22 23 #include <asm/asm.h> 24 #include <asm/inst.h> 25 #include <asm/mipsregs.h> 26 27 #include <kvm/iodev.h> 28 29 /* MIPS KVM register ids */ 30 #define MIPS_CP0_32(_R, _S) \ 31 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) 32 33 #define MIPS_CP0_64(_R, _S) \ 34 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) 35 36 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) 37 #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) 38 #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) 39 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) 40 #define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1) 41 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) 42 #define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3) 43 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) 44 #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) 45 #define KVM_REG_MIPS_CP0_SEGCTL0 MIPS_CP0_64(5, 2) 46 #define KVM_REG_MIPS_CP0_SEGCTL1 MIPS_CP0_64(5, 3) 47 #define KVM_REG_MIPS_CP0_SEGCTL2 MIPS_CP0_64(5, 4) 48 #define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5) 49 #define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6) 50 #define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7) 51 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) 52 #define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6) 53 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) 54 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) 55 #define KVM_REG_MIPS_CP0_BADINSTR MIPS_CP0_32(8, 1) 56 #define KVM_REG_MIPS_CP0_BADINSTRP MIPS_CP0_32(8, 2) 57 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) 58 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) 59 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) 60 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) 61 #define KVM_REG_MIPS_CP0_INTCTL MIPS_CP0_32(12, 1) 62 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) 63 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) 64 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) 65 #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) 66 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) 67 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) 68 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) 69 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) 70 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) 71 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) 72 #define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6) 73 #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) 74 #define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2) 75 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) 76 #define KVM_REG_MIPS_CP0_DIAG MIPS_CP0_32(22, 0) 77 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) 78 #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2) 79 #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3) 80 #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4) 81 #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5) 82 #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6) 83 #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7) 84 85 86 #define KVM_MAX_VCPUS 16 87 /* memory slots that does not exposed to userspace */ 88 #define KVM_PRIVATE_MEM_SLOTS 0 89 90 #define KVM_HALT_POLL_NS_DEFAULT 500000 91 92 extern unsigned long GUESTID_MASK; 93 extern unsigned long GUESTID_FIRST_VERSION; 94 extern unsigned long GUESTID_VERSION_MASK; 95 96 #define KVM_INVALID_ADDR 0xdeadbeef 97 98 /* 99 * EVA has overlapping user & kernel address spaces, so user VAs may be > 100 * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of 101 * PAGE_OFFSET. 102 */ 103 104 #define KVM_HVA_ERR_BAD (-1UL) 105 #define KVM_HVA_ERR_RO_BAD (-2UL) 106 107 static inline bool kvm_is_error_hva(unsigned long addr) 108 { 109 return IS_ERR_VALUE(addr); 110 } 111 112 struct kvm_vm_stat { 113 struct kvm_vm_stat_generic generic; 114 }; 115 116 struct kvm_vcpu_stat { 117 struct kvm_vcpu_stat_generic generic; 118 u64 wait_exits; 119 u64 cache_exits; 120 u64 signal_exits; 121 u64 int_exits; 122 u64 cop_unusable_exits; 123 u64 tlbmod_exits; 124 u64 tlbmiss_ld_exits; 125 u64 tlbmiss_st_exits; 126 u64 addrerr_st_exits; 127 u64 addrerr_ld_exits; 128 u64 syscall_exits; 129 u64 resvd_inst_exits; 130 u64 break_inst_exits; 131 u64 trap_inst_exits; 132 u64 msa_fpe_exits; 133 u64 fpe_exits; 134 u64 msa_disabled_exits; 135 u64 flush_dcache_exits; 136 u64 vz_gpsi_exits; 137 u64 vz_gsfc_exits; 138 u64 vz_hc_exits; 139 u64 vz_grr_exits; 140 u64 vz_gva_exits; 141 u64 vz_ghfc_exits; 142 u64 vz_gpa_exits; 143 u64 vz_resvd_exits; 144 #ifdef CONFIG_CPU_LOONGSON64 145 u64 vz_cpucfg_exits; 146 #endif 147 }; 148 149 struct kvm_arch_memory_slot { 150 }; 151 152 #ifdef CONFIG_CPU_LOONGSON64 153 struct ipi_state { 154 uint32_t status; 155 uint32_t en; 156 uint32_t set; 157 uint32_t clear; 158 uint64_t buf[4]; 159 }; 160 161 struct loongson_kvm_ipi; 162 163 struct ipi_io_device { 164 int node_id; 165 struct loongson_kvm_ipi *ipi; 166 struct kvm_io_device device; 167 }; 168 169 struct loongson_kvm_ipi { 170 spinlock_t lock; 171 struct kvm *kvm; 172 struct ipi_state ipistate[16]; 173 struct ipi_io_device dev_ipi[4]; 174 }; 175 #endif 176 177 struct kvm_arch { 178 /* Guest physical mm */ 179 struct mm_struct gpa_mm; 180 /* Mask of CPUs needing GPA ASID flush */ 181 cpumask_t asid_flush_mask; 182 #ifdef CONFIG_CPU_LOONGSON64 183 struct loongson_kvm_ipi ipi; 184 #endif 185 }; 186 187 #define N_MIPS_COPROC_REGS 32 188 #define N_MIPS_COPROC_SEL 8 189 190 struct mips_coproc { 191 unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; 192 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 193 unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; 194 #endif 195 }; 196 197 /* 198 * Coprocessor 0 register names 199 */ 200 #define MIPS_CP0_TLB_INDEX 0 201 #define MIPS_CP0_TLB_RANDOM 1 202 #define MIPS_CP0_TLB_LOW 2 203 #define MIPS_CP0_TLB_LO0 2 204 #define MIPS_CP0_TLB_LO1 3 205 #define MIPS_CP0_TLB_CONTEXT 4 206 #define MIPS_CP0_TLB_PG_MASK 5 207 #define MIPS_CP0_TLB_WIRED 6 208 #define MIPS_CP0_HWRENA 7 209 #define MIPS_CP0_BAD_VADDR 8 210 #define MIPS_CP0_COUNT 9 211 #define MIPS_CP0_TLB_HI 10 212 #define MIPS_CP0_COMPARE 11 213 #define MIPS_CP0_STATUS 12 214 #define MIPS_CP0_CAUSE 13 215 #define MIPS_CP0_EXC_PC 14 216 #define MIPS_CP0_PRID 15 217 #define MIPS_CP0_CONFIG 16 218 #define MIPS_CP0_LLADDR 17 219 #define MIPS_CP0_WATCH_LO 18 220 #define MIPS_CP0_WATCH_HI 19 221 #define MIPS_CP0_TLB_XCONTEXT 20 222 #define MIPS_CP0_DIAG 22 223 #define MIPS_CP0_ECC 26 224 #define MIPS_CP0_CACHE_ERR 27 225 #define MIPS_CP0_TAG_LO 28 226 #define MIPS_CP0_TAG_HI 29 227 #define MIPS_CP0_ERROR_PC 30 228 #define MIPS_CP0_DEBUG 23 229 #define MIPS_CP0_DEPC 24 230 #define MIPS_CP0_PERFCNT 25 231 #define MIPS_CP0_ERRCTL 26 232 #define MIPS_CP0_DATA_LO 28 233 #define MIPS_CP0_DATA_HI 29 234 #define MIPS_CP0_DESAVE 31 235 236 #define MIPS_CP0_CONFIG_SEL 0 237 #define MIPS_CP0_CONFIG1_SEL 1 238 #define MIPS_CP0_CONFIG2_SEL 2 239 #define MIPS_CP0_CONFIG3_SEL 3 240 #define MIPS_CP0_CONFIG4_SEL 4 241 #define MIPS_CP0_CONFIG5_SEL 5 242 243 #define MIPS_CP0_GUESTCTL2 10 244 #define MIPS_CP0_GUESTCTL2_SEL 5 245 #define MIPS_CP0_GTOFFSET 12 246 #define MIPS_CP0_GTOFFSET_SEL 7 247 248 /* Resume Flags */ 249 #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */ 250 #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 251 252 #define RESUME_GUEST 0 253 #define RESUME_GUEST_DR RESUME_FLAG_DR 254 #define RESUME_HOST RESUME_FLAG_HOST 255 256 enum emulation_result { 257 EMULATE_DONE, /* no further processing */ 258 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 259 EMULATE_FAIL, /* can't emulate this instruction */ 260 EMULATE_WAIT, /* WAIT instruction */ 261 EMULATE_PRIV_FAIL, 262 EMULATE_EXCEPT, /* A guest exception has been generated */ 263 EMULATE_HYPERCALL, /* HYPCALL instruction */ 264 }; 265 266 #if defined(CONFIG_64BIT) 267 #define VPN2_MASK GENMASK(cpu_vmbits - 1, 13) 268 #else 269 #define VPN2_MASK 0xffffe000 270 #endif 271 #define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data) 272 #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) 273 #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) 274 #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) 275 #define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1) 276 #define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V) 277 #define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D) 278 #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \ 279 ((y) & VPN2_MASK & ~(x).tlb_mask)) 280 #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \ 281 TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID)) 282 283 struct kvm_mips_tlb { 284 long tlb_mask; 285 long tlb_hi; 286 long tlb_lo[2]; 287 }; 288 289 #define KVM_MIPS_AUX_FPU 0x1 290 #define KVM_MIPS_AUX_MSA 0x2 291 292 struct kvm_vcpu_arch { 293 void *guest_ebase; 294 int (*vcpu_run)(struct kvm_vcpu *vcpu); 295 296 /* Host registers preserved across guest mode execution */ 297 unsigned long host_stack; 298 unsigned long host_gp; 299 unsigned long host_pgd; 300 unsigned long host_entryhi; 301 302 /* Host CP0 registers used when handling exits from guest */ 303 unsigned long host_cp0_badvaddr; 304 unsigned long host_cp0_epc; 305 u32 host_cp0_cause; 306 u32 host_cp0_guestctl0; 307 u32 host_cp0_badinstr; 308 u32 host_cp0_badinstrp; 309 310 /* GPRS */ 311 unsigned long gprs[32]; 312 unsigned long hi; 313 unsigned long lo; 314 unsigned long pc; 315 316 /* FPU State */ 317 struct mips_fpu_struct fpu; 318 /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */ 319 unsigned int aux_inuse; 320 321 /* COP0 State */ 322 struct mips_coproc *cop0; 323 324 /* Resume PC after MMIO completion */ 325 unsigned long io_pc; 326 /* GPR used as IO source/target */ 327 u32 io_gpr; 328 329 struct hrtimer comparecount_timer; 330 /* Count timer control KVM register */ 331 u32 count_ctl; 332 /* Count bias from the raw time */ 333 u32 count_bias; 334 /* Frequency of timer in Hz */ 335 u32 count_hz; 336 /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */ 337 s64 count_dyn_bias; 338 /* Resume time */ 339 ktime_t count_resume; 340 /* Period of timer tick in ns */ 341 u64 count_period; 342 343 /* Bitmask of exceptions that are pending */ 344 unsigned long pending_exceptions; 345 346 /* Bitmask of pending exceptions to be cleared */ 347 unsigned long pending_exceptions_clr; 348 349 /* Cache some mmu pages needed inside spinlock regions */ 350 struct kvm_mmu_memory_cache mmu_page_cache; 351 352 /* vcpu's vzguestid is different on each host cpu in an smp system */ 353 u32 vzguestid[NR_CPUS]; 354 355 /* wired guest TLB entries */ 356 struct kvm_mips_tlb *wired_tlb; 357 unsigned int wired_tlb_limit; 358 unsigned int wired_tlb_used; 359 360 /* emulated guest MAAR registers */ 361 unsigned long maar[6]; 362 363 /* Last CPU the VCPU state was loaded on */ 364 int last_sched_cpu; 365 /* Last CPU the VCPU actually executed guest code on */ 366 int last_exec_cpu; 367 368 /* WAIT executed */ 369 int wait; 370 371 u8 fpu_enabled; 372 u8 msa_enabled; 373 }; 374 375 static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg, 376 unsigned long val) 377 { 378 unsigned long temp; 379 do { 380 __asm__ __volatile__( 381 " .set push \n" 382 " .set "MIPS_ISA_ARCH_LEVEL" \n" 383 " "__stringify(LONG_LL) " %0, %1 \n" 384 " or %0, %2 \n" 385 " "__stringify(LONG_SC) " %0, %1 \n" 386 " .set pop \n" 387 : "=&r" (temp), "+m" (*reg) 388 : "r" (val)); 389 } while (unlikely(!temp)); 390 } 391 392 static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg, 393 unsigned long val) 394 { 395 unsigned long temp; 396 do { 397 __asm__ __volatile__( 398 " .set push \n" 399 " .set "MIPS_ISA_ARCH_LEVEL" \n" 400 " "__stringify(LONG_LL) " %0, %1 \n" 401 " and %0, %2 \n" 402 " "__stringify(LONG_SC) " %0, %1 \n" 403 " .set pop \n" 404 : "=&r" (temp), "+m" (*reg) 405 : "r" (~val)); 406 } while (unlikely(!temp)); 407 } 408 409 static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, 410 unsigned long change, 411 unsigned long val) 412 { 413 unsigned long temp; 414 do { 415 __asm__ __volatile__( 416 " .set push \n" 417 " .set "MIPS_ISA_ARCH_LEVEL" \n" 418 " "__stringify(LONG_LL) " %0, %1 \n" 419 " and %0, %2 \n" 420 " or %0, %3 \n" 421 " "__stringify(LONG_SC) " %0, %1 \n" 422 " .set pop \n" 423 : "=&r" (temp), "+m" (*reg) 424 : "r" (~change), "r" (val & change)); 425 } while (unlikely(!temp)); 426 } 427 428 /* Guest register types, used in accessor build below */ 429 #define __KVMT32 u32 430 #define __KVMTl unsigned long 431 432 /* 433 * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg() 434 * These operate on the saved guest C0 state in RAM. 435 */ 436 437 /* Generate saved context simple accessors */ 438 #define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \ 439 static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \ 440 { \ 441 return cop0->reg[(_reg)][(sel)]; \ 442 } \ 443 static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \ 444 __KVMT##type val) \ 445 { \ 446 cop0->reg[(_reg)][(sel)] = val; \ 447 } 448 449 /* Generate saved context bitwise modifiers */ 450 #define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \ 451 static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \ 452 __KVMT##type val) \ 453 { \ 454 cop0->reg[(_reg)][(sel)] |= val; \ 455 } \ 456 static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \ 457 __KVMT##type val) \ 458 { \ 459 cop0->reg[(_reg)][(sel)] &= ~val; \ 460 } \ 461 static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \ 462 __KVMT##type mask, \ 463 __KVMT##type val) \ 464 { \ 465 unsigned long _mask = mask; \ 466 cop0->reg[(_reg)][(sel)] &= ~_mask; \ 467 cop0->reg[(_reg)][(sel)] |= val & _mask; \ 468 } 469 470 /* Generate saved context atomic bitwise modifiers */ 471 #define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \ 472 static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \ 473 __KVMT##type val) \ 474 { \ 475 _kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \ 476 } \ 477 static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \ 478 __KVMT##type val) \ 479 { \ 480 _kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \ 481 } \ 482 static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \ 483 __KVMT##type mask, \ 484 __KVMT##type val) \ 485 { \ 486 _kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \ 487 val); \ 488 } 489 490 /* 491 * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg() 492 * These operate on the VZ guest C0 context in hardware. 493 */ 494 495 /* Generate VZ guest context simple accessors */ 496 #define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \ 497 static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \ 498 { \ 499 return read_gc0_##name(); \ 500 } \ 501 static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \ 502 __KVMT##type val) \ 503 { \ 504 write_gc0_##name(val); \ 505 } 506 507 /* Generate VZ guest context bitwise modifiers */ 508 #define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \ 509 static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \ 510 __KVMT##type val) \ 511 { \ 512 set_gc0_##name(val); \ 513 } \ 514 static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \ 515 __KVMT##type val) \ 516 { \ 517 clear_gc0_##name(val); \ 518 } \ 519 static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \ 520 __KVMT##type mask, \ 521 __KVMT##type val) \ 522 { \ 523 change_gc0_##name(mask, val); \ 524 } 525 526 /* Generate VZ guest context save/restore to/from saved context */ 527 #define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \ 528 static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \ 529 { \ 530 write_gc0_##name(cop0->reg[(_reg)][(sel)]); \ 531 } \ 532 static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \ 533 { \ 534 cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \ 535 } 536 537 /* 538 * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2() 539 * These wrap a set of operations to provide them with a different name. 540 */ 541 542 /* Generate simple accessor wrapper */ 543 #define __BUILD_KVM_RW_WRAP(name1, name2, type) \ 544 static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \ 545 { \ 546 return kvm_read_##name2(cop0); \ 547 } \ 548 static inline void kvm_write_##name1(struct mips_coproc *cop0, \ 549 __KVMT##type val) \ 550 { \ 551 kvm_write_##name2(cop0, val); \ 552 } 553 554 /* Generate bitwise modifier wrapper */ 555 #define __BUILD_KVM_SET_WRAP(name1, name2, type) \ 556 static inline void kvm_set_##name1(struct mips_coproc *cop0, \ 557 __KVMT##type val) \ 558 { \ 559 kvm_set_##name2(cop0, val); \ 560 } \ 561 static inline void kvm_clear_##name1(struct mips_coproc *cop0, \ 562 __KVMT##type val) \ 563 { \ 564 kvm_clear_##name2(cop0, val); \ 565 } \ 566 static inline void kvm_change_##name1(struct mips_coproc *cop0, \ 567 __KVMT##type mask, \ 568 __KVMT##type val) \ 569 { \ 570 kvm_change_##name2(cop0, mask, val); \ 571 } 572 573 /* 574 * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg() 575 * These generate accessors operating on the saved context in RAM, and wrap them 576 * with the common guest C0 accessors (for use by common emulation code). 577 */ 578 579 #define __BUILD_KVM_RW_SW(name, type, _reg, sel) \ 580 __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \ 581 __BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type) 582 583 #define __BUILD_KVM_SET_SW(name, type, _reg, sel) \ 584 __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \ 585 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type) 586 587 #define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \ 588 __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \ 589 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type) 590 591 /* 592 * VZ (hardware assisted virtualisation) 593 * These macros use the active guest state in VZ mode (hardware registers), 594 */ 595 596 /* 597 * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg() 598 * These generate accessors operating on the VZ guest context in hardware, and 599 * wrap them with the common guest C0 accessors (for use by common emulation 600 * code). 601 * 602 * Accessors operating on the saved context in RAM are also generated to allow 603 * convenient explicit saving and restoring of the state. 604 */ 605 606 #define __BUILD_KVM_RW_HW(name, type, _reg, sel) \ 607 __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \ 608 __BUILD_KVM_RW_VZ(name, type, _reg, sel) \ 609 __BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \ 610 __BUILD_KVM_SAVE_VZ(name, _reg, sel) 611 612 #define __BUILD_KVM_SET_HW(name, type, _reg, sel) \ 613 __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \ 614 __BUILD_KVM_SET_VZ(name, type, _reg, sel) \ 615 __BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type) 616 617 /* 618 * We can't do atomic modifications of COP0 state if hardware can modify it. 619 * Races must be handled explicitly. 620 */ 621 #define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW 622 623 /* 624 * Define accessors for CP0 registers that are accessible to the guest. These 625 * are primarily used by common emulation code, which may need to access the 626 * registers differently depending on the implementation. 627 * 628 * fns_hw/sw name type reg num select 629 */ 630 __BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0) 631 __BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0) 632 __BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0) 633 __BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0) 634 __BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1) 635 __BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2) 636 __BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3) 637 __BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0) 638 __BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1) 639 __BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2) 640 __BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3) 641 __BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4) 642 __BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5) 643 __BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6) 644 __BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7) 645 __BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0) 646 __BUILD_KVM_RW_HW(pwctl, 32, MIPS_CP0_TLB_WIRED, 6) 647 __BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0) 648 __BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0) 649 __BUILD_KVM_RW_HW(badinstr, 32, MIPS_CP0_BAD_VADDR, 1) 650 __BUILD_KVM_RW_HW(badinstrp, 32, MIPS_CP0_BAD_VADDR, 2) 651 __BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0) 652 __BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0) 653 __BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0) 654 __BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0) 655 __BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1) 656 __BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0) 657 __BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0) 658 __BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0) 659 __BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1) 660 __BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0) 661 __BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1) 662 __BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2) 663 __BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3) 664 __BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4) 665 __BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5) 666 __BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6) 667 __BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7) 668 __BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2) 669 __BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0) 670 __BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0) 671 __BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2) 672 __BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3) 673 __BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4) 674 __BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5) 675 __BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6) 676 __BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7) 677 678 /* Bitwise operations (on HW state) */ 679 __BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0) 680 /* Cause can be modified asynchronously from hardirq hrtimer callback */ 681 __BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0) 682 __BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1) 683 684 /* Bitwise operations (on saved state) */ 685 __BUILD_KVM_SET_SAVED(config, 32, MIPS_CP0_CONFIG, 0) 686 __BUILD_KVM_SET_SAVED(config1, 32, MIPS_CP0_CONFIG, 1) 687 __BUILD_KVM_SET_SAVED(config2, 32, MIPS_CP0_CONFIG, 2) 688 __BUILD_KVM_SET_SAVED(config3, 32, MIPS_CP0_CONFIG, 3) 689 __BUILD_KVM_SET_SAVED(config4, 32, MIPS_CP0_CONFIG, 4) 690 __BUILD_KVM_SET_SAVED(config5, 32, MIPS_CP0_CONFIG, 5) 691 692 /* Helpers */ 693 694 static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu) 695 { 696 return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) && 697 vcpu->fpu_enabled; 698 } 699 700 static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu) 701 { 702 return kvm_mips_guest_can_have_fpu(vcpu) && 703 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP; 704 } 705 706 static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu) 707 { 708 return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) && 709 vcpu->msa_enabled; 710 } 711 712 static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu) 713 { 714 return kvm_mips_guest_can_have_msa(vcpu) && 715 kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA; 716 } 717 718 struct kvm_mips_callbacks { 719 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); 720 int (*handle_tlb_mod)(struct kvm_vcpu *vcpu); 721 int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu); 722 int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu); 723 int (*handle_addr_err_st)(struct kvm_vcpu *vcpu); 724 int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu); 725 int (*handle_syscall)(struct kvm_vcpu *vcpu); 726 int (*handle_res_inst)(struct kvm_vcpu *vcpu); 727 int (*handle_break)(struct kvm_vcpu *vcpu); 728 int (*handle_trap)(struct kvm_vcpu *vcpu); 729 int (*handle_msa_fpe)(struct kvm_vcpu *vcpu); 730 int (*handle_fpe)(struct kvm_vcpu *vcpu); 731 int (*handle_msa_disabled)(struct kvm_vcpu *vcpu); 732 int (*handle_guest_exit)(struct kvm_vcpu *vcpu); 733 int (*hardware_enable)(void); 734 void (*hardware_disable)(void); 735 int (*check_extension)(struct kvm *kvm, long ext); 736 int (*vcpu_init)(struct kvm_vcpu *vcpu); 737 void (*vcpu_uninit)(struct kvm_vcpu *vcpu); 738 int (*vcpu_setup)(struct kvm_vcpu *vcpu); 739 void (*prepare_flush_shadow)(struct kvm *kvm); 740 gpa_t (*gva_to_gpa)(gva_t gva); 741 void (*queue_timer_int)(struct kvm_vcpu *vcpu); 742 void (*dequeue_timer_int)(struct kvm_vcpu *vcpu); 743 void (*queue_io_int)(struct kvm_vcpu *vcpu, 744 struct kvm_mips_interrupt *irq); 745 void (*dequeue_io_int)(struct kvm_vcpu *vcpu, 746 struct kvm_mips_interrupt *irq); 747 int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority, 748 u32 cause); 749 int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority, 750 u32 cause); 751 unsigned long (*num_regs)(struct kvm_vcpu *vcpu); 752 int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices); 753 int (*get_one_reg)(struct kvm_vcpu *vcpu, 754 const struct kvm_one_reg *reg, s64 *v); 755 int (*set_one_reg)(struct kvm_vcpu *vcpu, 756 const struct kvm_one_reg *reg, s64 v); 757 int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 758 int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu); 759 int (*vcpu_run)(struct kvm_vcpu *vcpu); 760 void (*vcpu_reenter)(struct kvm_vcpu *vcpu); 761 }; 762 extern struct kvm_mips_callbacks *kvm_mips_callbacks; 763 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); 764 765 /* Debug: dump vcpu state */ 766 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); 767 768 extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu); 769 770 /* Building of entry/exception code */ 771 int kvm_mips_entry_setup(void); 772 void *kvm_mips_build_vcpu_run(void *addr); 773 void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler); 774 void *kvm_mips_build_exception(void *addr, void *handler); 775 void *kvm_mips_build_exit(void *addr); 776 777 /* FPU/MSA context management */ 778 void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu); 779 void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu); 780 void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu); 781 void __kvm_save_msa(struct kvm_vcpu_arch *vcpu); 782 void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu); 783 void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu); 784 void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu); 785 void kvm_own_fpu(struct kvm_vcpu *vcpu); 786 void kvm_own_msa(struct kvm_vcpu *vcpu); 787 void kvm_drop_fpu(struct kvm_vcpu *vcpu); 788 void kvm_lose_fpu(struct kvm_vcpu *vcpu); 789 790 /* TLB handling */ 791 int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr, 792 struct kvm_vcpu *vcpu, bool write_fault); 793 794 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); 795 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, 796 unsigned long *gpa); 797 void kvm_vz_local_flush_roottlb_all_guests(void); 798 void kvm_vz_local_flush_guesttlb_all(void); 799 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index, 800 unsigned int count); 801 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index, 802 unsigned int count); 803 #ifdef CONFIG_CPU_LOONGSON64 804 void kvm_loongson_clear_guest_vtlb(void); 805 void kvm_loongson_clear_guest_ftlb(void); 806 #endif 807 808 /* MMU handling */ 809 810 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); 811 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); 812 pgd_t *kvm_pgd_alloc(void); 813 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 814 815 #define KVM_ARCH_WANT_MMU_NOTIFIER 816 817 /* Emulation */ 818 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); 819 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); 820 int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); 821 822 /** 823 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. 824 * @vcpu: Virtual CPU. 825 * 826 * Returns: Whether the TLBL exception was likely due to an instruction 827 * fetch fault rather than a data load fault. 828 */ 829 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu) 830 { 831 unsigned long badvaddr = vcpu->host_cp0_badvaddr; 832 unsigned long epc = msk_isa16_mode(vcpu->pc); 833 u32 cause = vcpu->host_cp0_cause; 834 835 if (epc == badvaddr) 836 return true; 837 838 /* 839 * Branches may be 32-bit or 16-bit instructions. 840 * This isn't exact, but we don't really support MIPS16 or microMIPS yet 841 * in KVM anyway. 842 */ 843 if ((cause & CAUSEF_BD) && badvaddr - epc <= 4) 844 return true; 845 846 return false; 847 } 848 849 extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu); 850 851 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu); 852 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count); 853 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack); 854 void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz); 855 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl); 856 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume); 857 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz); 858 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu); 859 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu); 860 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu); 861 862 /* fairly internal functions requiring some care to use */ 863 int kvm_mips_count_disabled(struct kvm_vcpu *vcpu); 864 ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count); 865 int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before, 866 u32 count, int min_drift); 867 868 void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu); 869 void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu); 870 871 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, 872 u32 cause, 873 struct kvm_vcpu *vcpu); 874 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, 875 u32 cause, 876 struct kvm_vcpu *vcpu); 877 878 /* COP0 */ 879 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu); 880 881 /* Hypercalls (hypcall.c) */ 882 883 enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu, 884 union mips_instruction inst); 885 int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu); 886 887 /* Misc */ 888 extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); 889 extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); 890 extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 891 struct kvm_mips_interrupt *irq); 892 893 static inline void kvm_arch_hardware_unsetup(void) {} 894 static inline void kvm_arch_sync_events(struct kvm *kvm) {} 895 static inline void kvm_arch_free_memslot(struct kvm *kvm, 896 struct kvm_memory_slot *slot) {} 897 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} 898 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 899 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} 900 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} 901 902 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB 903 int kvm_arch_flush_remote_tlb(struct kvm *kvm); 904 905 #endif /* __MIPS_KVM_HOST_H__ */ 906