1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * 4 * Copyright IBM Corp. 2008 5 * 6 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 7 */ 8 9 #ifndef __POWERPC_KVM_PPC_H__ 10 #define __POWERPC_KVM_PPC_H__ 11 12 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header 13 * dependencies. */ 14 15 #include <linux/mutex.h> 16 #include <linux/timer.h> 17 #include <linux/types.h> 18 #include <linux/kvm_types.h> 19 #include <linux/kvm_host.h> 20 #include <linux/bug.h> 21 #ifdef CONFIG_PPC_BOOK3S 22 #include <asm/kvm_book3s.h> 23 #else 24 #include <asm/kvm_booke.h> 25 #endif 26 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 27 #include <asm/paca.h> 28 #include <asm/xive.h> 29 #include <asm/cpu_has_feature.h> 30 #endif 31 32 /* 33 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction 34 * for supporting software breakpoint. 35 */ 36 #define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00 37 38 enum emulation_result { 39 EMULATE_DONE, /* no further processing */ 40 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ 41 EMULATE_FAIL, /* can't emulate this instruction */ 42 EMULATE_AGAIN, /* something went wrong. go again */ 43 EMULATE_EXIT_USER, /* emulation requires exit to user-space */ 44 }; 45 46 enum instruction_fetch_type { 47 INST_GENERIC, 48 INST_SC, /* system call */ 49 }; 50 51 enum xlate_instdata { 52 XLATE_INST, /* translate instruction address */ 53 XLATE_DATA /* translate data address */ 54 }; 55 56 enum xlate_readwrite { 57 XLATE_READ, /* check for read permissions */ 58 XLATE_WRITE /* check for write permissions */ 59 }; 60 61 extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu); 62 extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu); 63 extern void kvmppc_handler_highmem(void); 64 65 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); 66 extern int kvmppc_handle_load(struct kvm_vcpu *vcpu, 67 unsigned int rt, unsigned int bytes, 68 int is_default_endian); 69 extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu, 70 unsigned int rt, unsigned int bytes, 71 int is_default_endian); 72 extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, 73 unsigned int rt, unsigned int bytes, 74 int is_default_endian, int mmio_sign_extend); 75 extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, 76 unsigned int rt, unsigned int bytes, int is_default_endian); 77 extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, 78 unsigned int rs, unsigned int bytes, int is_default_endian); 79 extern int kvmppc_handle_store(struct kvm_vcpu *vcpu, 80 u64 val, unsigned int bytes, 81 int is_default_endian); 82 extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, 83 int rs, unsigned int bytes, 84 int is_default_endian); 85 86 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, 87 enum instruction_fetch_type type, u32 *inst); 88 89 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 90 bool data); 91 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 92 bool data); 93 extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu); 94 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu); 95 extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu); 96 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); 97 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); 98 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu); 99 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu); 100 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu); 101 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu); 102 103 /* Core-specific hooks */ 104 105 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, 106 unsigned int gtlb_idx); 107 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); 108 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); 109 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); 110 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); 111 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, 112 gva_t eaddr); 113 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu); 114 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu); 115 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, 116 enum xlate_instdata xlid, enum xlate_readwrite xlrw, 117 struct kvmppc_pte *pte); 118 119 extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu); 120 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu); 121 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu); 122 extern int kvmppc_core_check_processor_compat(void); 123 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, 124 struct kvm_translation *tr); 125 126 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 127 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); 128 129 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); 130 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); 131 extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags); 132 extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu); 133 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); 134 extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu); 135 extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu); 136 extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu); 137 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); 138 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); 139 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 140 struct kvm_interrupt *irq); 141 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); 142 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags, 143 ulong esr_flags); 144 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, 145 ulong dear_flags, 146 ulong esr_flags); 147 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu); 148 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, 149 ulong esr_flags); 150 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); 151 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); 152 153 extern int kvmppc_booke_init(void); 154 extern void kvmppc_booke_exit(void); 155 156 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); 157 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); 158 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); 159 160 extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order); 161 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info); 162 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order); 163 extern void kvmppc_free_hpt(struct kvm_hpt_info *info); 164 extern void kvmppc_rmap_reset(struct kvm *kvm); 165 extern long kvmppc_prepare_vrma(struct kvm *kvm, 166 struct kvm_userspace_memory_region *mem); 167 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, 168 struct kvm_memory_slot *memslot, unsigned long porder); 169 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); 170 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, 171 struct iommu_group *grp); 172 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, 173 struct iommu_group *grp); 174 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm); 175 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm); 176 extern void kvmppc_setup_partition_table(struct kvm *kvm); 177 178 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, 179 struct kvm_create_spapr_tce_64 *args); 180 extern struct kvmppc_spapr_tce_table *kvmppc_find_table( 181 struct kvm *kvm, unsigned long liobn); 182 #define kvmppc_ioba_validate(stt, ioba, npages) \ 183 (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \ 184 (stt)->size, (ioba), (npages)) ? \ 185 H_PARAMETER : H_SUCCESS) 186 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 187 unsigned long ioba, unsigned long tce); 188 extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, 189 unsigned long liobn, unsigned long ioba, 190 unsigned long tce_list, unsigned long npages); 191 extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, 192 unsigned long liobn, unsigned long ioba, 193 unsigned long tce_value, unsigned long npages); 194 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 195 unsigned long ioba); 196 extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages); 197 extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages); 198 extern int kvmppc_core_init_vm(struct kvm *kvm); 199 extern void kvmppc_core_destroy_vm(struct kvm *kvm); 200 extern void kvmppc_core_free_memslot(struct kvm *kvm, 201 struct kvm_memory_slot *slot); 202 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, 203 const struct kvm_memory_slot *old, 204 struct kvm_memory_slot *new, 205 enum kvm_mr_change change); 206 extern void kvmppc_core_commit_memory_region(struct kvm *kvm, 207 struct kvm_memory_slot *old, 208 const struct kvm_memory_slot *new, 209 enum kvm_mr_change change); 210 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, 211 struct kvm_ppc_smmu_info *info); 212 extern void kvmppc_core_flush_memslot(struct kvm *kvm, 213 struct kvm_memory_slot *memslot); 214 215 extern int kvmppc_bookehv_init(void); 216 extern void kvmppc_bookehv_exit(void); 217 218 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu); 219 220 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *); 221 extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, 222 struct kvm_ppc_resize_hpt *rhpt); 223 extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, 224 struct kvm_ppc_resize_hpt *rhpt); 225 226 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); 227 228 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp); 229 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu); 230 extern void kvmppc_rtas_tokens_free(struct kvm *kvm); 231 232 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, 233 u32 priority); 234 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, 235 u32 *priority); 236 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq); 237 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq); 238 239 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu); 240 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu); 241 242 union kvmppc_one_reg { 243 u32 wval; 244 u64 dval; 245 vector128 vval; 246 u64 vsxval[2]; 247 u32 vsx32val[4]; 248 u16 vsx16val[8]; 249 u8 vsx8val[16]; 250 struct { 251 u64 addr; 252 u64 length; 253 } vpaval; 254 u64 xive_timaval[2]; 255 }; 256 257 struct kvmppc_ops { 258 struct module *owner; 259 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 260 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 261 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id, 262 union kvmppc_one_reg *val); 263 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id, 264 union kvmppc_one_reg *val); 265 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 266 void (*vcpu_put)(struct kvm_vcpu *vcpu); 267 void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); 268 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr); 269 int (*vcpu_run)(struct kvm_vcpu *vcpu); 270 int (*vcpu_create)(struct kvm_vcpu *vcpu); 271 void (*vcpu_free)(struct kvm_vcpu *vcpu); 272 int (*check_requests)(struct kvm_vcpu *vcpu); 273 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log); 274 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot); 275 int (*prepare_memory_region)(struct kvm *kvm, 276 const struct kvm_memory_slot *old, 277 struct kvm_memory_slot *new, 278 enum kvm_mr_change change); 279 void (*commit_memory_region)(struct kvm *kvm, 280 struct kvm_memory_slot *old, 281 const struct kvm_memory_slot *new, 282 enum kvm_mr_change change); 283 bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range); 284 bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); 285 bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); 286 bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); 287 void (*free_memslot)(struct kvm_memory_slot *slot); 288 int (*init_vm)(struct kvm *kvm); 289 void (*destroy_vm)(struct kvm *kvm); 290 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info); 291 int (*emulate_op)(struct kvm_vcpu *vcpu, 292 unsigned int inst, int *advance); 293 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); 294 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); 295 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu); 296 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl, 297 unsigned long arg); 298 int (*hcall_implemented)(unsigned long hcall); 299 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *, 300 struct irq_bypass_producer *); 301 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *, 302 struct irq_bypass_producer *); 303 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg); 304 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info); 305 int (*set_smt_mode)(struct kvm *kvm, unsigned long mode, 306 unsigned long flags); 307 void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr); 308 int (*enable_nested)(struct kvm *kvm); 309 int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, 310 int size); 311 int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, 312 int size); 313 int (*enable_svm)(struct kvm *kvm); 314 int (*svm_off)(struct kvm *kvm); 315 int (*enable_dawr1)(struct kvm *kvm); 316 bool (*hash_v3_possible)(void); 317 int (*create_vm_debugfs)(struct kvm *kvm); 318 int (*create_vcpu_debugfs)(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); 319 }; 320 321 extern struct kvmppc_ops *kvmppc_hv_ops; 322 extern struct kvmppc_ops *kvmppc_pr_ops; 323 324 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, 325 enum instruction_fetch_type type, u32 *inst) 326 { 327 int ret = EMULATE_DONE; 328 u32 fetched_inst; 329 330 /* Load the instruction manually if it failed to do so in the 331 * exit path */ 332 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) 333 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst); 334 335 /* Write fetch_failed unswapped if the fetch failed */ 336 if (ret == EMULATE_DONE) 337 fetched_inst = kvmppc_need_byteswap(vcpu) ? 338 swab32(vcpu->arch.last_inst) : 339 vcpu->arch.last_inst; 340 else 341 fetched_inst = vcpu->arch.last_inst; 342 343 *inst = fetched_inst; 344 return ret; 345 } 346 347 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) 348 { 349 return kvm->arch.kvm_ops == kvmppc_hv_ops; 350 } 351 352 extern int kvmppc_hwrng_present(void); 353 354 /* 355 * Cuts out inst bits with ordering according to spec. 356 * That means the leftmost bit is zero. All given bits are included. 357 */ 358 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb) 359 { 360 u32 r; 361 u32 mask; 362 363 BUG_ON(msb > lsb); 364 365 mask = (1 << (lsb - msb + 1)) - 1; 366 r = (inst >> (63 - lsb)) & mask; 367 368 return r; 369 } 370 371 /* 372 * Replaces inst bits with ordering according to spec. 373 */ 374 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value) 375 { 376 u32 r; 377 u32 mask; 378 379 BUG_ON(msb > lsb); 380 381 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb); 382 r = (inst & ~mask) | ((value << (63 - lsb)) & mask); 383 384 return r; 385 } 386 387 #define one_reg_size(id) \ 388 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) 389 390 #define get_reg_val(id, reg) ({ \ 391 union kvmppc_one_reg __u; \ 392 switch (one_reg_size(id)) { \ 393 case 4: __u.wval = (reg); break; \ 394 case 8: __u.dval = (reg); break; \ 395 default: BUG(); \ 396 } \ 397 __u; \ 398 }) 399 400 401 #define set_reg_val(id, val) ({ \ 402 u64 __v; \ 403 switch (one_reg_size(id)) { \ 404 case 4: __v = (val).wval; break; \ 405 case 8: __v = (val).dval; break; \ 406 default: BUG(); \ 407 } \ 408 __v; \ 409 }) 410 411 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 412 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 413 414 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 415 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 416 417 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); 418 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); 419 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *); 420 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *); 421 422 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); 423 424 struct openpic; 425 426 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 427 extern void kvm_cma_reserve(void) __init; 428 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) 429 { 430 paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr; 431 } 432 433 static inline void kvmppc_set_xive_tima(int cpu, 434 unsigned long phys_addr, 435 void __iomem *virt_addr) 436 { 437 paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr; 438 paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr; 439 } 440 441 static inline u32 kvmppc_get_xics_latch(void) 442 { 443 u32 xirr; 444 445 xirr = get_paca()->kvm_hstate.saved_xirr; 446 get_paca()->kvm_hstate.saved_xirr = 0; 447 return xirr; 448 } 449 450 /* 451 * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to 452 * a CPU thread that's running/napping inside of a guest is by default regarded 453 * as a request to wake the CPU (if needed) and continue execution within the 454 * guest, potentially to process new state like externally-generated 455 * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI). 456 * 457 * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called 458 * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the 459 * target CPU's PACA. To avoid unnecessary exits to the host, this flag should 460 * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on 461 * the receiving side prior to processing the IPI work. 462 * 463 * NOTE: 464 * 465 * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi(). 466 * This is to guard against sequences such as the following: 467 * 468 * CPU 469 * X: smp_muxed_ipi_set_message(): 470 * X: smp_mb() 471 * X: message[RESCHEDULE] = 1 472 * X: doorbell_global_ipi(42): 473 * X: kvmppc_set_host_ipi(42) 474 * X: ppc_msgsnd_sync()/smp_mb() 475 * X: ppc_msgsnd() -> 42 476 * 42: doorbell_exception(): // from CPU X 477 * 42: ppc_msgsync() 478 * 105: smp_muxed_ipi_set_message(): 479 * 105: smb_mb() 480 * // STORE DEFERRED DUE TO RE-ORDERING 481 * --105: message[CALL_FUNCTION] = 1 482 * | 105: doorbell_global_ipi(42): 483 * | 105: kvmppc_set_host_ipi(42) 484 * | 42: kvmppc_clear_host_ipi(42) 485 * | 42: smp_ipi_demux_relaxed() 486 * | 42: // returns to executing guest 487 * | // RE-ORDERED STORE COMPLETES 488 * ->105: message[CALL_FUNCTION] = 1 489 * 105: ppc_msgsnd_sync()/smp_mb() 490 * 105: ppc_msgsnd() -> 42 491 * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored 492 * 105: // hangs waiting on 42 to process messages/call_single_queue 493 * 494 * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is 495 * to guard against sequences such as the following (as well as to create 496 * a read-side pairing with the barrier in kvmppc_set_host_ipi()): 497 * 498 * CPU 499 * X: smp_muxed_ipi_set_message(): 500 * X: smp_mb() 501 * X: message[RESCHEDULE] = 1 502 * X: doorbell_global_ipi(42): 503 * X: kvmppc_set_host_ipi(42) 504 * X: ppc_msgsnd_sync()/smp_mb() 505 * X: ppc_msgsnd() -> 42 506 * 42: doorbell_exception(): // from CPU X 507 * 42: ppc_msgsync() 508 * // STORE DEFERRED DUE TO RE-ORDERING 509 * -- 42: kvmppc_clear_host_ipi(42) 510 * | 42: smp_ipi_demux_relaxed() 511 * | 105: smp_muxed_ipi_set_message(): 512 * | 105: smb_mb() 513 * | 105: message[CALL_FUNCTION] = 1 514 * | 105: doorbell_global_ipi(42): 515 * | 105: kvmppc_set_host_ipi(42) 516 * | // RE-ORDERED STORE COMPLETES 517 * -> 42: kvmppc_clear_host_ipi(42) 518 * 42: // returns to executing guest 519 * 105: ppc_msgsnd_sync()/smp_mb() 520 * 105: ppc_msgsnd() -> 42 521 * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored 522 * 105: // hangs waiting on 42 to process messages/call_single_queue 523 */ 524 static inline void kvmppc_set_host_ipi(int cpu) 525 { 526 /* 527 * order stores of IPI messages vs. setting of host_ipi flag 528 * 529 * pairs with the barrier in kvmppc_clear_host_ipi() 530 */ 531 smp_mb(); 532 paca_ptrs[cpu]->kvm_hstate.host_ipi = 1; 533 } 534 535 static inline void kvmppc_clear_host_ipi(int cpu) 536 { 537 paca_ptrs[cpu]->kvm_hstate.host_ipi = 0; 538 /* 539 * order clearing of host_ipi flag vs. processing of IPI messages 540 * 541 * pairs with the barrier in kvmppc_set_host_ipi() 542 */ 543 smp_mb(); 544 } 545 546 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) 547 { 548 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu); 549 } 550 551 extern void kvm_hv_vm_activated(void); 552 extern void kvm_hv_vm_deactivated(void); 553 extern bool kvm_hv_mode_active(void); 554 555 extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu); 556 557 #else 558 static inline void __init kvm_cma_reserve(void) 559 {} 560 561 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) 562 {} 563 564 static inline void kvmppc_set_xive_tima(int cpu, 565 unsigned long phys_addr, 566 void __iomem *virt_addr) 567 {} 568 569 static inline u32 kvmppc_get_xics_latch(void) 570 { 571 return 0; 572 } 573 574 static inline void kvmppc_set_host_ipi(int cpu) 575 {} 576 577 static inline void kvmppc_clear_host_ipi(int cpu) 578 {} 579 580 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) 581 { 582 kvm_vcpu_kick(vcpu); 583 } 584 585 static inline bool kvm_hv_mode_active(void) { return false; } 586 587 #endif 588 589 #ifdef CONFIG_PPC_PSERIES 590 static inline bool kvmhv_on_pseries(void) 591 { 592 return !cpu_has_feature(CPU_FTR_HVMODE); 593 } 594 #else 595 static inline bool kvmhv_on_pseries(void) 596 { 597 return false; 598 } 599 #endif 600 601 #ifdef CONFIG_KVM_XICS 602 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu) 603 { 604 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS; 605 } 606 607 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap( 608 struct kvm *kvm) 609 { 610 if (kvm && kvm_irq_bypass) 611 return kvm->arch.pimap; 612 return NULL; 613 } 614 615 extern void kvmppc_alloc_host_rm_ops(void); 616 extern void kvmppc_free_host_rm_ops(void); 617 extern void kvmppc_free_pimap(struct kvm *kvm); 618 extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall); 619 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu); 620 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd); 621 extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req); 622 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu); 623 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval); 624 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev, 625 struct kvm_vcpu *vcpu, u32 cpu); 626 extern void kvmppc_xics_ipi_action(void); 627 extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq, 628 unsigned long host_irq); 629 extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq, 630 unsigned long host_irq); 631 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr, 632 struct kvmppc_irq_map *irq_map, 633 struct kvmppc_passthru_irqmap *pimap, 634 bool *again); 635 636 extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, 637 int level, bool line_status); 638 639 extern int h_ipi_redirect; 640 #else 641 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap( 642 struct kvm *kvm) 643 { return NULL; } 644 static inline void kvmppc_alloc_host_rm_ops(void) {} 645 static inline void kvmppc_free_host_rm_ops(void) {} 646 static inline void kvmppc_free_pimap(struct kvm *kvm) {} 647 static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall) 648 { return 0; } 649 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu) 650 { return 0; } 651 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { } 652 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) 653 { return 0; } 654 static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req) 655 { return 0; } 656 #endif 657 658 #ifdef CONFIG_KVM_XIVE 659 /* 660 * Below the first "xive" is the "eXternal Interrupt Virtualization Engine" 661 * ie. P9 new interrupt controller, while the second "xive" is the legacy 662 * "eXternal Interrupt Vector Entry" which is the configuration of an 663 * interrupt on the "xics" interrupt controller on P8 and earlier. Those 664 * two function consume or produce a legacy "XIVE" state from the 665 * new "XIVE" interrupt controller. 666 */ 667 extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, 668 u32 priority); 669 extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, 670 u32 *priority); 671 extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq); 672 extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq); 673 674 extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev, 675 struct kvm_vcpu *vcpu, u32 cpu); 676 extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu); 677 extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, 678 unsigned long host_irq); 679 extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, 680 unsigned long host_irq); 681 extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu); 682 extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval); 683 684 extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, 685 int level, bool line_status); 686 extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu); 687 extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu); 688 extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu); 689 690 static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) 691 { 692 return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE; 693 } 694 695 extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, 696 struct kvm_vcpu *vcpu, u32 cpu); 697 extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu); 698 extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, 699 union kvmppc_one_reg *val); 700 extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, 701 union kvmppc_one_reg *val); 702 extern bool kvmppc_xive_native_supported(void); 703 704 #else 705 static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, 706 u32 priority) { return -1; } 707 static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, 708 u32 *priority) { return -1; } 709 static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; } 710 static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; } 711 712 static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev, 713 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; } 714 static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { } 715 static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, 716 struct irq_desc *host_desc) { return -ENODEV; } 717 static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, 718 struct irq_desc *host_desc) { return -ENODEV; } 719 static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; } 720 static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; } 721 722 static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, 723 int level, bool line_status) { return -ENODEV; } 724 static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { } 725 static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { } 726 static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { } 727 728 static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) 729 { return 0; } 730 static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, 731 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; } 732 static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { } 733 static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, 734 union kvmppc_one_reg *val) 735 { return 0; } 736 static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, 737 union kvmppc_one_reg *val) 738 { return -ENOENT; } 739 740 #endif /* CONFIG_KVM_XIVE */ 741 742 #if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER) 743 static inline bool xics_on_xive(void) 744 { 745 return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE); 746 } 747 #else 748 static inline bool xics_on_xive(void) 749 { 750 return false; 751 } 752 #endif 753 754 /* 755 * Prototypes for functions called only from assembler code. 756 * Having prototypes reduces sparse errors. 757 */ 758 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 759 unsigned long ioba, unsigned long tce); 760 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, 761 unsigned long liobn, unsigned long ioba, 762 unsigned long tce_list, unsigned long npages); 763 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, 764 unsigned long liobn, unsigned long ioba, 765 unsigned long tce_value, unsigned long npages); 766 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 767 unsigned int yield_count); 768 long kvmppc_rm_h_random(struct kvm_vcpu *vcpu); 769 void kvmhv_commence_exit(int trap); 770 void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu); 771 void kvmppc_subcore_enter_guest(void); 772 void kvmppc_subcore_exit_guest(void); 773 long kvmppc_realmode_hmi_handler(void); 774 long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu); 775 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, 776 long pte_index, unsigned long pteh, unsigned long ptel); 777 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, 778 unsigned long pte_index, unsigned long avpn); 779 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu); 780 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, 781 unsigned long pte_index, unsigned long avpn); 782 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, 783 unsigned long pte_index); 784 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, 785 unsigned long pte_index); 786 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, 787 unsigned long pte_index); 788 long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, 789 unsigned long dest, unsigned long src); 790 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, 791 unsigned long slb_v, unsigned int status, bool data); 792 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu); 793 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu); 794 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server); 795 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, 796 unsigned long mfrr); 797 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); 798 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr); 799 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu); 800 801 /* 802 * Host-side operations we want to set up while running in real 803 * mode in the guest operating on the xics. 804 * Currently only VCPU wakeup is supported. 805 */ 806 807 union kvmppc_rm_state { 808 unsigned long raw; 809 struct { 810 u32 in_host; 811 u32 rm_action; 812 }; 813 }; 814 815 struct kvmppc_host_rm_core { 816 union kvmppc_rm_state rm_state; 817 void *rm_data; 818 char pad[112]; 819 }; 820 821 struct kvmppc_host_rm_ops { 822 struct kvmppc_host_rm_core *rm_core; 823 void (*vcpu_kick)(struct kvm_vcpu *vcpu); 824 }; 825 826 extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; 827 828 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu) 829 { 830 #ifdef CONFIG_KVM_BOOKE_HV 831 return mfspr(SPRN_GEPR); 832 #elif defined(CONFIG_BOOKE) 833 return vcpu->arch.epr; 834 #else 835 return 0; 836 #endif 837 } 838 839 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr) 840 { 841 #ifdef CONFIG_KVM_BOOKE_HV 842 mtspr(SPRN_GEPR, epr); 843 #elif defined(CONFIG_BOOKE) 844 vcpu->arch.epr = epr; 845 #endif 846 } 847 848 #ifdef CONFIG_KVM_MPIC 849 850 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu); 851 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu, 852 u32 cpu); 853 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu); 854 855 #else 856 857 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu) 858 { 859 } 860 861 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, 862 struct kvm_vcpu *vcpu, u32 cpu) 863 { 864 return -EINVAL; 865 } 866 867 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, 868 struct kvm_vcpu *vcpu) 869 { 870 } 871 872 #endif /* CONFIG_KVM_MPIC */ 873 874 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, 875 struct kvm_config_tlb *cfg); 876 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, 877 struct kvm_dirty_tlb *cfg); 878 879 long kvmppc_alloc_lpid(void); 880 void kvmppc_claim_lpid(long lpid); 881 void kvmppc_free_lpid(long lpid); 882 void kvmppc_init_lpid(unsigned long nr_lpids); 883 884 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn) 885 { 886 struct page *page; 887 /* 888 * We can only access pages that the kernel maps 889 * as memory. Bail out for unmapped ones. 890 */ 891 if (!pfn_valid(pfn)) 892 return; 893 894 /* Clear i-cache for new pages */ 895 page = pfn_to_page(pfn); 896 if (!test_bit(PG_dcache_clean, &page->flags)) { 897 flush_dcache_icache_page(page); 898 set_bit(PG_dcache_clean, &page->flags); 899 } 900 } 901 902 /* 903 * Shared struct helpers. The shared struct can be little or big endian, 904 * depending on the guest endianness. So expose helpers to all of them. 905 */ 906 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu) 907 { 908 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) 909 /* Only Book3S_64 PR supports bi-endian for now */ 910 return vcpu->arch.shared_big_endian; 911 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__) 912 /* Book3s_64 HV on little endian is always little endian */ 913 return false; 914 #else 915 return true; 916 #endif 917 } 918 919 #define SPRNG_WRAPPER_GET(reg, bookehv_spr) \ 920 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ 921 { \ 922 return mfspr(bookehv_spr); \ 923 } \ 924 925 #define SPRNG_WRAPPER_SET(reg, bookehv_spr) \ 926 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \ 927 { \ 928 mtspr(bookehv_spr, val); \ 929 } \ 930 931 #define SHARED_WRAPPER_GET(reg, size) \ 932 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ 933 { \ 934 if (kvmppc_shared_big_endian(vcpu)) \ 935 return be##size##_to_cpu(vcpu->arch.shared->reg); \ 936 else \ 937 return le##size##_to_cpu(vcpu->arch.shared->reg); \ 938 } \ 939 940 #define SHARED_WRAPPER_SET(reg, size) \ 941 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ 942 { \ 943 if (kvmppc_shared_big_endian(vcpu)) \ 944 vcpu->arch.shared->reg = cpu_to_be##size(val); \ 945 else \ 946 vcpu->arch.shared->reg = cpu_to_le##size(val); \ 947 } \ 948 949 #define SHARED_WRAPPER(reg, size) \ 950 SHARED_WRAPPER_GET(reg, size) \ 951 SHARED_WRAPPER_SET(reg, size) \ 952 953 #define SPRNG_WRAPPER(reg, bookehv_spr) \ 954 SPRNG_WRAPPER_GET(reg, bookehv_spr) \ 955 SPRNG_WRAPPER_SET(reg, bookehv_spr) \ 956 957 #ifdef CONFIG_KVM_BOOKE_HV 958 959 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \ 960 SPRNG_WRAPPER(reg, bookehv_spr) \ 961 962 #else 963 964 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \ 965 SHARED_WRAPPER(reg, size) \ 966 967 #endif 968 969 SHARED_WRAPPER(critical, 64) 970 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0) 971 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1) 972 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2) 973 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3) 974 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0) 975 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1) 976 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR) 977 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR) 978 SHARED_WRAPPER_GET(msr, 64) 979 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) 980 { 981 if (kvmppc_shared_big_endian(vcpu)) 982 vcpu->arch.shared->msr = cpu_to_be64(val); 983 else 984 vcpu->arch.shared->msr = cpu_to_le64(val); 985 } 986 SHARED_WRAPPER(dsisr, 32) 987 SHARED_WRAPPER(int_pending, 32) 988 SHARED_WRAPPER(sprg4, 64) 989 SHARED_WRAPPER(sprg5, 64) 990 SHARED_WRAPPER(sprg6, 64) 991 SHARED_WRAPPER(sprg7, 64) 992 993 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr) 994 { 995 if (kvmppc_shared_big_endian(vcpu)) 996 return be32_to_cpu(vcpu->arch.shared->sr[nr]); 997 else 998 return le32_to_cpu(vcpu->arch.shared->sr[nr]); 999 } 1000 1001 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val) 1002 { 1003 if (kvmppc_shared_big_endian(vcpu)) 1004 vcpu->arch.shared->sr[nr] = cpu_to_be32(val); 1005 else 1006 vcpu->arch.shared->sr[nr] = cpu_to_le32(val); 1007 } 1008 1009 /* 1010 * Please call after prepare_to_enter. This function puts the lazy ee and irq 1011 * disabled tracking state back to normal mode, without actually enabling 1012 * interrupts. 1013 */ 1014 static inline void kvmppc_fix_ee_before_entry(void) 1015 { 1016 trace_hardirqs_on(); 1017 1018 #ifdef CONFIG_PPC64 1019 /* 1020 * To avoid races, the caller must have gone directly from having 1021 * interrupts fully-enabled to hard-disabled. 1022 */ 1023 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS); 1024 1025 /* Only need to enable IRQs by hard enabling them after this */ 1026 local_paca->irq_happened = 0; 1027 irq_soft_mask_set(IRQS_ENABLED); 1028 #endif 1029 } 1030 1031 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb) 1032 { 1033 ulong ea; 1034 ulong msr_64bit = 0; 1035 1036 ea = kvmppc_get_gpr(vcpu, rb); 1037 if (ra) 1038 ea += kvmppc_get_gpr(vcpu, ra); 1039 1040 #if defined(CONFIG_PPC_BOOK3E_64) 1041 msr_64bit = MSR_CM; 1042 #elif defined(CONFIG_PPC_BOOK3S_64) 1043 msr_64bit = MSR_SF; 1044 #endif 1045 1046 if (!(kvmppc_get_msr(vcpu) & msr_64bit)) 1047 ea = (uint32_t)ea; 1048 1049 return ea; 1050 } 1051 1052 extern void xics_wake_cpu(int cpu); 1053 1054 #endif /* __POWERPC_KVM_PPC_H__ */ 1055