1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20 #ifndef __ASM_KVM_BOOK3S_H__ 21 #define __ASM_KVM_BOOK3S_H__ 22 23 #include <linux/types.h> 24 #include <linux/kvm_host.h> 25 #include <asm/kvm_book3s_asm.h> 26 27 struct kvmppc_bat { 28 u64 raw; 29 u32 bepi; 30 u32 bepi_mask; 31 u32 brpn; 32 u8 wimg; 33 u8 pp; 34 bool vs : 1; 35 bool vp : 1; 36 }; 37 38 struct kvmppc_sid_map { 39 u64 guest_vsid; 40 u64 guest_esid; 41 u64 host_vsid; 42 bool valid : 1; 43 }; 44 45 #define SID_MAP_BITS 9 46 #define SID_MAP_NUM (1 << SID_MAP_BITS) 47 #define SID_MAP_MASK (SID_MAP_NUM - 1) 48 49 #ifdef CONFIG_PPC_BOOK3S_64 50 #define SID_CONTEXTS 1 51 #else 52 #define SID_CONTEXTS 128 53 #define VSID_POOL_SIZE (SID_CONTEXTS * 16) 54 #endif 55 56 struct hpte_cache { 57 struct hlist_node list_pte; 58 struct hlist_node list_pte_long; 59 struct hlist_node list_vpte; 60 struct hlist_node list_vpte_long; 61 struct rcu_head rcu_head; 62 u64 host_vpn; 63 u64 pfn; 64 ulong slot; 65 struct kvmppc_pte pte; 66 }; 67 68 struct kvmppc_vcpu_book3s { 69 struct kvm_vcpu vcpu; 70 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; 71 struct kvmppc_sid_map sid_map[SID_MAP_NUM]; 72 struct { 73 u64 esid; 74 u64 vsid; 75 } slb_shadow[64]; 76 u8 slb_shadow_max; 77 struct kvmppc_bat ibat[8]; 78 struct kvmppc_bat dbat[8]; 79 u64 hid[6]; 80 u64 gqr[8]; 81 u64 sdr1; 82 u64 hior; 83 u64 msr_mask; 84 u64 purr_offset; 85 u64 spurr_offset; 86 #ifdef CONFIG_PPC_BOOK3S_32 87 u32 vsid_pool[VSID_POOL_SIZE]; 88 u32 vsid_next; 89 #else 90 u64 proto_vsid_first; 91 u64 proto_vsid_max; 92 u64 proto_vsid_next; 93 #endif 94 int context_id[SID_CONTEXTS]; 95 96 bool hior_explicit; /* HIOR is set by ioctl, not PVR */ 97 98 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; 99 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; 100 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 101 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; 102 int hpte_cache_count; 103 spinlock_t mmu_lock; 104 }; 105 106 #define CONTEXT_HOST 0 107 #define CONTEXT_GUEST 1 108 #define CONTEXT_GUEST_END 2 109 110 #define VSID_REAL 0x1fffffffffc00000ULL 111 #define VSID_BAT 0x1fffffffffb00000ULL 112 #define VSID_REAL_DR 0x2000000000000000ULL 113 #define VSID_REAL_IR 0x4000000000000000ULL 114 #define VSID_PR 0x8000000000000000ULL 115 116 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask); 117 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); 118 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); 119 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); 120 extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr); 121 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); 122 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); 123 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); 124 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 125 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 126 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 127 extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run, 128 struct kvm_vcpu *vcpu, unsigned long addr, 129 unsigned long status); 130 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, 131 unsigned long slb_v, unsigned long valid); 132 133 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 134 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); 135 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); 136 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); 137 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 138 extern int kvmppc_mmu_hpte_sysinit(void); 139 extern void kvmppc_mmu_hpte_sysexit(void); 140 extern int kvmppc_mmu_hv_init(void); 141 142 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); 143 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); 144 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); 145 extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags); 146 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, 147 bool upper, u32 val); 148 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 149 extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 150 extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 151 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, 152 unsigned long *rmap, long pte_index, int realmode); 153 extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, 154 unsigned long pte_index); 155 void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep, 156 unsigned long pte_index); 157 extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr, 158 unsigned long *nb_ret); 159 extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr); 160 extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, 161 long pte_index, unsigned long pteh, unsigned long ptel); 162 extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, 163 long pte_index, unsigned long pteh, unsigned long ptel, 164 pgd_t *pgdir, bool realmode, unsigned long *idx_ret); 165 extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, 166 unsigned long pte_index, unsigned long avpn, 167 unsigned long *hpret); 168 extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, 169 struct kvm_memory_slot *memslot, unsigned long *map); 170 171 extern void kvmppc_entry_trampoline(void); 172 extern void kvmppc_hv_entry_trampoline(void); 173 extern void kvmppc_load_up_fpu(void); 174 extern void kvmppc_load_up_altivec(void); 175 extern void kvmppc_load_up_vsx(void); 176 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); 177 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); 178 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); 179 180 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 181 { 182 return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); 183 } 184 185 extern void kvm_return_point(void); 186 187 /* Also add subarch specific defines */ 188 189 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER 190 #include <asm/kvm_book3s_32.h> 191 #endif 192 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 193 #include <asm/kvm_book3s_64.h> 194 #endif 195 196 #ifdef CONFIG_KVM_BOOK3S_PR 197 198 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) 199 { 200 return to_book3s(vcpu)->hior; 201 } 202 203 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, 204 unsigned long pending_now, unsigned long old_pending) 205 { 206 if (pending_now) 207 vcpu->arch.shared->int_pending = 1; 208 else if (old_pending) 209 vcpu->arch.shared->int_pending = 0; 210 } 211 212 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 213 { 214 if ( num < 14 ) { 215 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 216 svcpu->gpr[num] = val; 217 svcpu_put(svcpu); 218 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val; 219 } else 220 vcpu->arch.gpr[num] = val; 221 } 222 223 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) 224 { 225 if ( num < 14 ) { 226 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 227 ulong r = svcpu->gpr[num]; 228 svcpu_put(svcpu); 229 return r; 230 } else 231 return vcpu->arch.gpr[num]; 232 } 233 234 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) 235 { 236 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 237 svcpu->cr = val; 238 svcpu_put(svcpu); 239 to_book3s(vcpu)->shadow_vcpu->cr = val; 240 } 241 242 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) 243 { 244 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 245 u32 r; 246 r = svcpu->cr; 247 svcpu_put(svcpu); 248 return r; 249 } 250 251 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) 252 { 253 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 254 svcpu->xer = val; 255 to_book3s(vcpu)->shadow_vcpu->xer = val; 256 svcpu_put(svcpu); 257 } 258 259 static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) 260 { 261 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 262 u32 r; 263 r = svcpu->xer; 264 svcpu_put(svcpu); 265 return r; 266 } 267 268 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) 269 { 270 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 271 svcpu->ctr = val; 272 svcpu_put(svcpu); 273 } 274 275 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) 276 { 277 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 278 ulong r; 279 r = svcpu->ctr; 280 svcpu_put(svcpu); 281 return r; 282 } 283 284 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) 285 { 286 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 287 svcpu->lr = val; 288 svcpu_put(svcpu); 289 } 290 291 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) 292 { 293 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 294 ulong r; 295 r = svcpu->lr; 296 svcpu_put(svcpu); 297 return r; 298 } 299 300 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) 301 { 302 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 303 svcpu->pc = val; 304 svcpu_put(svcpu); 305 } 306 307 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) 308 { 309 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 310 ulong r; 311 r = svcpu->pc; 312 svcpu_put(svcpu); 313 return r; 314 } 315 316 static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) 317 { 318 ulong pc = kvmppc_get_pc(vcpu); 319 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 320 u32 r; 321 322 /* Load the instruction manually if it failed to do so in the 323 * exit path */ 324 if (svcpu->last_inst == KVM_INST_FETCH_FAILED) 325 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); 326 327 r = svcpu->last_inst; 328 svcpu_put(svcpu); 329 return r; 330 } 331 332 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 333 { 334 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 335 ulong r; 336 r = svcpu->fault_dar; 337 svcpu_put(svcpu); 338 return r; 339 } 340 341 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) 342 { 343 ulong crit_raw = vcpu->arch.shared->critical; 344 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); 345 bool crit; 346 347 /* Truncate crit indicators in 32 bit mode */ 348 if (!(vcpu->arch.shared->msr & MSR_SF)) { 349 crit_raw &= 0xffffffff; 350 crit_r1 &= 0xffffffff; 351 } 352 353 /* Critical section when crit == r1 */ 354 crit = (crit_raw == crit_r1); 355 /* ... and we're in supervisor mode */ 356 crit = crit && !(vcpu->arch.shared->msr & MSR_PR); 357 358 return crit; 359 } 360 #else /* CONFIG_KVM_BOOK3S_PR */ 361 362 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) 363 { 364 return 0; 365 } 366 367 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, 368 unsigned long pending_now, unsigned long old_pending) 369 { 370 } 371 372 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 373 { 374 vcpu->arch.gpr[num] = val; 375 } 376 377 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) 378 { 379 return vcpu->arch.gpr[num]; 380 } 381 382 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) 383 { 384 vcpu->arch.cr = val; 385 } 386 387 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) 388 { 389 return vcpu->arch.cr; 390 } 391 392 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) 393 { 394 vcpu->arch.xer = val; 395 } 396 397 static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) 398 { 399 return vcpu->arch.xer; 400 } 401 402 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) 403 { 404 vcpu->arch.ctr = val; 405 } 406 407 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) 408 { 409 return vcpu->arch.ctr; 410 } 411 412 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) 413 { 414 vcpu->arch.lr = val; 415 } 416 417 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) 418 { 419 return vcpu->arch.lr; 420 } 421 422 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) 423 { 424 vcpu->arch.pc = val; 425 } 426 427 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) 428 { 429 return vcpu->arch.pc; 430 } 431 432 static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) 433 { 434 ulong pc = kvmppc_get_pc(vcpu); 435 436 /* Load the instruction manually if it failed to do so in the 437 * exit path */ 438 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) 439 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); 440 441 return vcpu->arch.last_inst; 442 } 443 444 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 445 { 446 return vcpu->arch.fault_dar; 447 } 448 449 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) 450 { 451 return false; 452 } 453 #endif 454 455 /* Magic register values loaded into r3 and r4 before the 'sc' assembly 456 * instruction for the OSI hypercalls */ 457 #define OSI_SC_MAGIC_R3 0x113724FA 458 #define OSI_SC_MAGIC_R4 0x77810F9B 459 460 #define INS_DCBZ 0x7c0007ec 461 462 /* LPIDs we support with this build -- runtime limit may be lower */ 463 #define KVMPPC_NR_LPIDS (LPID_RSVD + 1) 464 465 #endif /* __ASM_KVM_BOOK3S_H__ */ 466