1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20 #ifndef __ASM_KVM_BOOK3S_H__ 21 #define __ASM_KVM_BOOK3S_H__ 22 23 #include <linux/types.h> 24 #include <linux/kvm_host.h> 25 #include <asm/kvm_book3s_asm.h> 26 27 struct kvmppc_bat { 28 u64 raw; 29 u32 bepi; 30 u32 bepi_mask; 31 u32 brpn; 32 u8 wimg; 33 u8 pp; 34 bool vs : 1; 35 bool vp : 1; 36 }; 37 38 struct kvmppc_sid_map { 39 u64 guest_vsid; 40 u64 guest_esid; 41 u64 host_vsid; 42 bool valid : 1; 43 }; 44 45 #define SID_MAP_BITS 9 46 #define SID_MAP_NUM (1 << SID_MAP_BITS) 47 #define SID_MAP_MASK (SID_MAP_NUM - 1) 48 49 #ifdef CONFIG_PPC_BOOK3S_64 50 #define SID_CONTEXTS 1 51 #else 52 #define SID_CONTEXTS 128 53 #define VSID_POOL_SIZE (SID_CONTEXTS * 16) 54 #endif 55 56 struct hpte_cache { 57 struct hlist_node list_pte; 58 struct hlist_node list_pte_long; 59 struct hlist_node list_vpte; 60 struct hlist_node list_vpte_long; 61 #ifdef CONFIG_PPC_BOOK3S_64 62 struct hlist_node list_vpte_64k; 63 #endif 64 struct rcu_head rcu_head; 65 u64 host_vpn; 66 u64 pfn; 67 ulong slot; 68 struct kvmppc_pte pte; 69 int pagesize; 70 }; 71 72 /* 73 * Struct for a virtual core. 74 * Note: entry_exit_map combines a bitmap of threads that have entered 75 * in the bottom 8 bits and a bitmap of threads that have exited in the 76 * next 8 bits. This is so that we can atomically set the entry bit 77 * iff the exit map is 0 without taking a lock. 78 */ 79 struct kvmppc_vcore { 80 int n_runnable; 81 int num_threads; 82 int entry_exit_map; 83 int napping_threads; 84 int first_vcpuid; 85 u16 pcpu; 86 u16 last_cpu; 87 u8 vcore_state; 88 u8 in_guest; 89 struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS]; 90 struct list_head preempt_list; 91 spinlock_t lock; 92 struct swait_queue_head wq; 93 spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */ 94 u64 stolen_tb; 95 u64 preempt_tb; 96 struct kvm_vcpu *runner; 97 struct kvm *kvm; 98 u64 tb_offset; /* guest timebase - host timebase */ 99 u64 tb_offset_applied; /* timebase offset currently in force */ 100 ulong lpcr; 101 u32 arch_compat; 102 ulong pcr; 103 ulong dpdes; /* doorbell state (POWER8) */ 104 ulong vtb; /* virtual timebase */ 105 ulong conferring_threads; 106 unsigned int halt_poll_ns; 107 atomic_t online_count; 108 }; 109 110 struct kvmppc_vcpu_book3s { 111 struct kvmppc_sid_map sid_map[SID_MAP_NUM]; 112 struct { 113 u64 esid; 114 u64 vsid; 115 } slb_shadow[64]; 116 u8 slb_shadow_max; 117 struct kvmppc_bat ibat[8]; 118 struct kvmppc_bat dbat[8]; 119 u64 hid[6]; 120 u64 gqr[8]; 121 u64 sdr1; 122 u64 hior; 123 u64 msr_mask; 124 u64 vtb; 125 #ifdef CONFIG_PPC_BOOK3S_32 126 u32 vsid_pool[VSID_POOL_SIZE]; 127 u32 vsid_next; 128 #else 129 u64 proto_vsid_first; 130 u64 proto_vsid_max; 131 u64 proto_vsid_next; 132 #endif 133 int context_id[SID_CONTEXTS]; 134 135 bool hior_explicit; /* HIOR is set by ioctl, not PVR */ 136 137 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; 138 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; 139 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 140 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; 141 #ifdef CONFIG_PPC_BOOK3S_64 142 struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K]; 143 #endif 144 int hpte_cache_count; 145 spinlock_t mmu_lock; 146 }; 147 148 #define VSID_REAL 0x07ffffffffc00000ULL 149 #define VSID_BAT 0x07ffffffffb00000ULL 150 #define VSID_64K 0x0800000000000000ULL 151 #define VSID_1T 0x1000000000000000ULL 152 #define VSID_REAL_DR 0x2000000000000000ULL 153 #define VSID_REAL_IR 0x4000000000000000ULL 154 #define VSID_PR 0x8000000000000000ULL 155 156 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask); 157 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); 158 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); 159 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); 160 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); 161 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); 162 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); 163 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, 164 bool iswrite); 165 extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 166 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 167 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); 168 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 169 extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run, 170 struct kvm_vcpu *vcpu, unsigned long addr, 171 unsigned long status); 172 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, 173 unsigned long slb_v, unsigned long valid); 174 extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, 175 unsigned long gpa, gva_t ea, int is_store); 176 177 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 178 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); 179 extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte); 180 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); 181 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); 182 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 183 extern int kvmppc_mmu_hpte_sysinit(void); 184 extern void kvmppc_mmu_hpte_sysexit(void); 185 extern int kvmppc_mmu_hv_init(void); 186 extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc); 187 188 extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run, 189 struct kvm_vcpu *vcpu, 190 unsigned long ea, unsigned long dsisr); 191 extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, 192 struct kvmppc_pte *gpte, u64 root, 193 u64 *pte_ret_p); 194 extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, 195 struct kvmppc_pte *gpte, u64 table, 196 int table_index, u64 *pte_ret_p); 197 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 198 struct kvmppc_pte *gpte, bool data, bool iswrite); 199 extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, 200 unsigned int shift, struct kvm_memory_slot *memslot, 201 unsigned int lpid); 202 extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, 203 bool writing, unsigned long gpa, 204 unsigned int lpid); 205 extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, 206 unsigned long gpa, 207 struct kvm_memory_slot *memslot, 208 bool writing, bool kvm_ro, 209 pte_t *inserted_pte, unsigned int *levelp); 210 extern int kvmppc_init_vm_radix(struct kvm *kvm); 211 extern void kvmppc_free_radix(struct kvm *kvm); 212 extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, 213 unsigned int lpid); 214 extern int kvmppc_radix_init(void); 215 extern void kvmppc_radix_exit(void); 216 extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 217 unsigned long gfn); 218 extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, 219 unsigned long gpa, unsigned int shift, 220 struct kvm_memory_slot *memslot, 221 unsigned int lpid); 222 extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 223 unsigned long gfn); 224 extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 225 unsigned long gfn); 226 extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, 227 struct kvm_memory_slot *memslot, unsigned long *map); 228 extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info); 229 230 /* XXX remove this export when load_last_inst() is generic */ 231 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); 232 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); 233 extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, 234 unsigned int vec); 235 extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags); 236 extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac); 237 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, 238 bool upper, u32 val); 239 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 240 extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 241 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, 242 bool writing, bool *writable); 243 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, 244 unsigned long *rmap, long pte_index, int realmode); 245 extern void kvmppc_update_dirty_map(struct kvm_memory_slot *memslot, 246 unsigned long gfn, unsigned long psize); 247 extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep, 248 unsigned long pte_index); 249 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep, 250 unsigned long pte_index); 251 extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr, 252 unsigned long *nb_ret); 253 extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr, 254 unsigned long gpa, bool dirty); 255 extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, 256 long pte_index, unsigned long pteh, unsigned long ptel, 257 pgd_t *pgdir, bool realmode, unsigned long *idx_ret); 258 extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, 259 unsigned long pte_index, unsigned long avpn, 260 unsigned long *hpret); 261 extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, 262 struct kvm_memory_slot *memslot, unsigned long *map); 263 extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, 264 struct kvm_memory_slot *memslot, 265 unsigned long *map); 266 extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, 267 unsigned long mask); 268 extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr); 269 270 extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu); 271 extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu); 272 extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu); 273 274 extern void kvmppc_entry_trampoline(void); 275 extern void kvmppc_hv_entry_trampoline(void); 276 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); 277 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); 278 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); 279 extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm); 280 extern int kvmppc_hcall_impl_pr(unsigned long cmd); 281 extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); 282 extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); 283 extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); 284 285 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 286 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu); 287 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu); 288 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu); 289 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu); 290 #else 291 static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {} 292 static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {} 293 static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {} 294 static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} 295 #endif 296 297 long kvmhv_nested_init(void); 298 void kvmhv_nested_exit(void); 299 void kvmhv_vm_nested_init(struct kvm *kvm); 300 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu); 301 void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1); 302 void kvmhv_release_all_nested(struct kvm *kvm); 303 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); 304 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu); 305 int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu, 306 u64 time_limit, unsigned long lpcr); 307 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); 308 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, 309 struct hv_guest_state *hr); 310 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu); 311 312 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); 313 314 extern int kvm_irq_bypass; 315 316 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 317 { 318 return vcpu->arch.book3s; 319 } 320 321 /* Also add subarch specific defines */ 322 323 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 324 #include <asm/kvm_book3s_32.h> 325 #endif 326 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 327 #include <asm/kvm_book3s_64.h> 328 #endif 329 330 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 331 { 332 vcpu->arch.regs.gpr[num] = val; 333 } 334 335 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) 336 { 337 return vcpu->arch.regs.gpr[num]; 338 } 339 340 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) 341 { 342 vcpu->arch.regs.ccr = val; 343 } 344 345 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) 346 { 347 return vcpu->arch.regs.ccr; 348 } 349 350 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) 351 { 352 vcpu->arch.regs.xer = val; 353 } 354 355 static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) 356 { 357 return vcpu->arch.regs.xer; 358 } 359 360 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) 361 { 362 vcpu->arch.regs.ctr = val; 363 } 364 365 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) 366 { 367 return vcpu->arch.regs.ctr; 368 } 369 370 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) 371 { 372 vcpu->arch.regs.link = val; 373 } 374 375 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) 376 { 377 return vcpu->arch.regs.link; 378 } 379 380 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) 381 { 382 vcpu->arch.regs.nip = val; 383 } 384 385 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) 386 { 387 return vcpu->arch.regs.nip; 388 } 389 390 static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu); 391 static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) 392 { 393 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); 394 } 395 396 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 397 { 398 return vcpu->arch.fault_dar; 399 } 400 401 static inline bool is_kvmppc_resume_guest(int r) 402 { 403 return (r == RESUME_GUEST || r == RESUME_GUEST_NV); 404 } 405 406 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm); 407 static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu) 408 { 409 /* Only PR KVM supports the magic page */ 410 return !is_kvmppc_hv_enabled(vcpu->kvm); 411 } 412 413 extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu); 414 extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu); 415 416 /* Magic register values loaded into r3 and r4 before the 'sc' assembly 417 * instruction for the OSI hypercalls */ 418 #define OSI_SC_MAGIC_R3 0x113724FA 419 #define OSI_SC_MAGIC_R4 0x77810F9B 420 421 #define INS_DCBZ 0x7c0007ec 422 /* TO = 31 for unconditional trap */ 423 #define INS_TW 0x7fe00008 424 425 #define SPLIT_HACK_MASK 0xff000000 426 #define SPLIT_HACK_OFFS 0xfb000000 427 428 /* 429 * This packs a VCPU ID from the [0..KVM_MAX_VCPU_ID) space down to the 430 * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride 431 * (but not its actual threading mode, which is not available) to avoid 432 * collisions. 433 * 434 * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block 435 * 0) unchanged: if the guest is filling each VCORE completely then it will be 436 * using consecutive IDs and it will fill the space without any packing. 437 * 438 * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo 439 * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is 440 * added to avoid collisions. 441 * 442 * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only 443 * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs 444 * can be safely packed into the second half of each VCORE by adding an offset 445 * of (stride / 2). 446 * 447 * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4)) 448 * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each 449 * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4). 450 * 451 * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a 452 * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7 453 * must be free to use. 454 * 455 * (The offsets for each block are stored in block_offsets[], indexed by the 456 * block number if the stride is 8. For cases where the guest's stride is less 457 * than 8, we can re-use the block_offsets array by multiplying the block 458 * number by (MAX_SMT_THREADS / stride) to reach the correct entry.) 459 */ 460 static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id) 461 { 462 const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7}; 463 int stride = kvm->arch.emul_smt_mode; 464 int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride); 465 u32 packed_id; 466 467 if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack")) 468 return 0; 469 packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block]; 470 if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed")) 471 return 0; 472 return packed_id; 473 } 474 475 #endif /* __ASM_KVM_BOOK3S_H__ */ 476