xref: /openbmc/linux/arch/powerpc/include/asm/kvm_ppc.h (revision 588b48ca)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2008
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  */
19 
20 #ifndef __POWERPC_KVM_PPC_H__
21 #define __POWERPC_KVM_PPC_H__
22 
23 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24  * dependencies. */
25 
26 #include <linux/mutex.h>
27 #include <linux/timer.h>
28 #include <linux/types.h>
29 #include <linux/kvm_types.h>
30 #include <linux/kvm_host.h>
31 #include <linux/bug.h>
32 #ifdef CONFIG_PPC_BOOK3S
33 #include <asm/kvm_book3s.h>
34 #else
35 #include <asm/kvm_booke.h>
36 #endif
37 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38 #include <asm/paca.h>
39 #endif
40 
41 enum emulation_result {
42 	EMULATE_DONE,         /* no further processing */
43 	EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
44 	EMULATE_FAIL,         /* can't emulate this instruction */
45 	EMULATE_AGAIN,        /* something went wrong. go again */
46 	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
47 };
48 
49 enum instruction_type {
50 	INST_GENERIC,
51 	INST_SC,		/* system call */
52 };
53 
54 enum xlate_instdata {
55 	XLATE_INST,		/* translate instruction address */
56 	XLATE_DATA		/* translate data address */
57 };
58 
59 enum xlate_readwrite {
60 	XLATE_READ,		/* check for read permissions */
61 	XLATE_WRITE		/* check for write permissions */
62 };
63 
64 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
65 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
66 extern void kvmppc_handler_highmem(void);
67 
68 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
69 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
70                               unsigned int rt, unsigned int bytes,
71 			      int is_default_endian);
72 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
73                                unsigned int rt, unsigned int bytes,
74 			       int is_default_endian);
75 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
76 			       u64 val, unsigned int bytes,
77 			       int is_default_endian);
78 
79 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
80 				 enum instruction_type type, u32 *inst);
81 
82 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
83 		     bool data);
84 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
85 		     bool data);
86 extern int kvmppc_emulate_instruction(struct kvm_run *run,
87                                       struct kvm_vcpu *vcpu);
88 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
89 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
90 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
91 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
92 extern void kvmppc_decrementer_func(unsigned long data);
93 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
94 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
95 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
96 
97 /* Core-specific hooks */
98 
99 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
100                            unsigned int gtlb_idx);
101 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
102 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
103 extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
104 extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
105 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
106 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
107 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
108                               gva_t eaddr);
109 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
110 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
111 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
112 			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
113 			struct kvmppc_pte *pte);
114 
115 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
116                                                 unsigned int id);
117 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
118 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
119 extern int kvmppc_core_check_processor_compat(void);
120 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
121                                       struct kvm_translation *tr);
122 
123 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
124 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
125 
126 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
127 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
128 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
129 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
130 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
131 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
132                                        struct kvm_interrupt *irq);
133 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
134 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
135 					ulong esr_flags);
136 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
137 					   ulong dear_flags,
138 					   ulong esr_flags);
139 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
140 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
141 					   ulong esr_flags);
142 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
143 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
144 
145 extern int kvmppc_booke_init(void);
146 extern void kvmppc_booke_exit(void);
147 
148 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
149 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
150 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
151 
152 extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp);
153 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp);
154 extern void kvmppc_free_hpt(struct kvm *kvm);
155 extern long kvmppc_prepare_vrma(struct kvm *kvm,
156 				struct kvm_userspace_memory_region *mem);
157 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
158 			struct kvm_memory_slot *memslot, unsigned long porder);
159 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
160 
161 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
162 				struct kvm_create_spapr_tce *args);
163 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
164 			     unsigned long ioba, unsigned long tce);
165 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
166 			     unsigned long ioba);
167 extern struct kvm_rma_info *kvm_alloc_rma(void);
168 extern void kvm_release_rma(struct kvm_rma_info *ri);
169 extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
170 extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
171 extern int kvmppc_core_init_vm(struct kvm *kvm);
172 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
173 extern void kvmppc_core_free_memslot(struct kvm *kvm,
174 				     struct kvm_memory_slot *free,
175 				     struct kvm_memory_slot *dont);
176 extern int kvmppc_core_create_memslot(struct kvm *kvm,
177 				      struct kvm_memory_slot *slot,
178 				      unsigned long npages);
179 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
180 				struct kvm_memory_slot *memslot,
181 				struct kvm_userspace_memory_region *mem);
182 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
183 				struct kvm_userspace_memory_region *mem,
184 				const struct kvm_memory_slot *old);
185 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
186 				      struct kvm_ppc_smmu_info *info);
187 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
188 				      struct kvm_memory_slot *memslot);
189 
190 extern int kvmppc_bookehv_init(void);
191 extern void kvmppc_bookehv_exit(void);
192 
193 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
194 
195 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
196 
197 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
198 
199 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
200 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
201 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
202 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
203 				u32 priority);
204 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
205 				u32 *priority);
206 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
207 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
208 
209 union kvmppc_one_reg {
210 	u32	wval;
211 	u64	dval;
212 	vector128 vval;
213 	u64	vsxval[2];
214 	struct {
215 		u64	addr;
216 		u64	length;
217 	}	vpaval;
218 };
219 
220 struct kvmppc_ops {
221 	struct module *owner;
222 	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
223 	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
224 	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
225 			   union kvmppc_one_reg *val);
226 	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
227 			   union kvmppc_one_reg *val);
228 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
229 	void (*vcpu_put)(struct kvm_vcpu *vcpu);
230 	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
231 	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
232 	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
233 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
234 	int (*check_requests)(struct kvm_vcpu *vcpu);
235 	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
236 	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
237 	int (*prepare_memory_region)(struct kvm *kvm,
238 				     struct kvm_memory_slot *memslot,
239 				     struct kvm_userspace_memory_region *mem);
240 	void (*commit_memory_region)(struct kvm *kvm,
241 				     struct kvm_userspace_memory_region *mem,
242 				     const struct kvm_memory_slot *old);
243 	int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
244 	int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
245 			   unsigned long end);
246 	int (*age_hva)(struct kvm *kvm, unsigned long hva);
247 	int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
248 	void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
249 	void (*mmu_destroy)(struct kvm_vcpu *vcpu);
250 	void (*free_memslot)(struct kvm_memory_slot *free,
251 			     struct kvm_memory_slot *dont);
252 	int (*create_memslot)(struct kvm_memory_slot *slot,
253 			      unsigned long npages);
254 	int (*init_vm)(struct kvm *kvm);
255 	void (*destroy_vm)(struct kvm *kvm);
256 	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
257 	int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
258 			  unsigned int inst, int *advance);
259 	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
260 	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
261 	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
262 	long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
263 			      unsigned long arg);
264 	int (*hcall_implemented)(unsigned long hcall);
265 };
266 
267 extern struct kvmppc_ops *kvmppc_hv_ops;
268 extern struct kvmppc_ops *kvmppc_pr_ops;
269 
270 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
271 					enum instruction_type type, u32 *inst)
272 {
273 	int ret = EMULATE_DONE;
274 	u32 fetched_inst;
275 
276 	/* Load the instruction manually if it failed to do so in the
277 	 * exit path */
278 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
279 		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
280 
281 	/*  Write fetch_failed unswapped if the fetch failed */
282 	if (ret == EMULATE_DONE)
283 		fetched_inst = kvmppc_need_byteswap(vcpu) ?
284 				swab32(vcpu->arch.last_inst) :
285 				vcpu->arch.last_inst;
286 	else
287 		fetched_inst = vcpu->arch.last_inst;
288 
289 	*inst = fetched_inst;
290 	return ret;
291 }
292 
293 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
294 {
295 	return kvm->arch.kvm_ops == kvmppc_hv_ops;
296 }
297 
298 /*
299  * Cuts out inst bits with ordering according to spec.
300  * That means the leftmost bit is zero. All given bits are included.
301  */
302 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
303 {
304 	u32 r;
305 	u32 mask;
306 
307 	BUG_ON(msb > lsb);
308 
309 	mask = (1 << (lsb - msb + 1)) - 1;
310 	r = (inst >> (63 - lsb)) & mask;
311 
312 	return r;
313 }
314 
315 /*
316  * Replaces inst bits with ordering according to spec.
317  */
318 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
319 {
320 	u32 r;
321 	u32 mask;
322 
323 	BUG_ON(msb > lsb);
324 
325 	mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
326 	r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
327 
328 	return r;
329 }
330 
331 #define one_reg_size(id)	\
332 	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
333 
334 #define get_reg_val(id, reg)	({		\
335 	union kvmppc_one_reg __u;		\
336 	switch (one_reg_size(id)) {		\
337 	case 4: __u.wval = (reg); break;	\
338 	case 8: __u.dval = (reg); break;	\
339 	default: BUG();				\
340 	}					\
341 	__u;					\
342 })
343 
344 
345 #define set_reg_val(id, val)	({		\
346 	u64 __v;				\
347 	switch (one_reg_size(id)) {		\
348 	case 4: __v = (val).wval; break;	\
349 	case 8: __v = (val).dval; break;	\
350 	default: BUG();				\
351 	}					\
352 	__v;					\
353 })
354 
355 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
356 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
357 
358 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
359 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
360 
361 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
362 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
363 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
364 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
365 
366 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
367 
368 struct openpic;
369 
370 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
371 extern void kvm_cma_reserve(void) __init;
372 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
373 {
374 	paca[cpu].kvm_hstate.xics_phys = addr;
375 }
376 
377 static inline u32 kvmppc_get_xics_latch(void)
378 {
379 	u32 xirr;
380 
381 	xirr = get_paca()->kvm_hstate.saved_xirr;
382 	get_paca()->kvm_hstate.saved_xirr = 0;
383 	return xirr;
384 }
385 
386 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
387 {
388 	paca[cpu].kvm_hstate.host_ipi = host_ipi;
389 }
390 
391 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
392 {
393 	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
394 }
395 
396 extern void kvm_hv_vm_activated(void);
397 extern void kvm_hv_vm_deactivated(void);
398 extern bool kvm_hv_mode_active(void);
399 
400 #else
401 static inline void __init kvm_cma_reserve(void)
402 {}
403 
404 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
405 {}
406 
407 static inline u32 kvmppc_get_xics_latch(void)
408 {
409 	return 0;
410 }
411 
412 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
413 {}
414 
415 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
416 {
417 	kvm_vcpu_kick(vcpu);
418 }
419 
420 static inline bool kvm_hv_mode_active(void)		{ return false; }
421 
422 #endif
423 
424 #ifdef CONFIG_KVM_XICS
425 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
426 {
427 	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
428 }
429 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
430 extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
431 extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
432 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
433 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
434 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
435 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
436 			struct kvm_vcpu *vcpu, u32 cpu);
437 #else
438 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
439 	{ return 0; }
440 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
441 static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
442 					 unsigned long server)
443 	{ return -EINVAL; }
444 static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
445 					struct kvm_irq_level *args)
446 	{ return -ENOTTY; }
447 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
448 	{ return 0; }
449 #endif
450 
451 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
452 {
453 #ifdef CONFIG_KVM_BOOKE_HV
454 	return mfspr(SPRN_GEPR);
455 #elif defined(CONFIG_BOOKE)
456 	return vcpu->arch.epr;
457 #else
458 	return 0;
459 #endif
460 }
461 
462 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
463 {
464 #ifdef CONFIG_KVM_BOOKE_HV
465 	mtspr(SPRN_GEPR, epr);
466 #elif defined(CONFIG_BOOKE)
467 	vcpu->arch.epr = epr;
468 #endif
469 }
470 
471 #ifdef CONFIG_KVM_MPIC
472 
473 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
474 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
475 			     u32 cpu);
476 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
477 
478 #else
479 
480 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
481 {
482 }
483 
484 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
485 		struct kvm_vcpu *vcpu, u32 cpu)
486 {
487 	return -EINVAL;
488 }
489 
490 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
491 		struct kvm_vcpu *vcpu)
492 {
493 }
494 
495 #endif /* CONFIG_KVM_MPIC */
496 
497 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
498 			      struct kvm_config_tlb *cfg);
499 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
500 			     struct kvm_dirty_tlb *cfg);
501 
502 long kvmppc_alloc_lpid(void);
503 void kvmppc_claim_lpid(long lpid);
504 void kvmppc_free_lpid(long lpid);
505 void kvmppc_init_lpid(unsigned long nr_lpids);
506 
507 static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
508 {
509 	struct page *page;
510 	/*
511 	 * We can only access pages that the kernel maps
512 	 * as memory. Bail out for unmapped ones.
513 	 */
514 	if (!pfn_valid(pfn))
515 		return;
516 
517 	/* Clear i-cache for new pages */
518 	page = pfn_to_page(pfn);
519 	if (!test_bit(PG_arch_1, &page->flags)) {
520 		flush_dcache_icache_page(page);
521 		set_bit(PG_arch_1, &page->flags);
522 	}
523 }
524 
525 /*
526  * Shared struct helpers. The shared struct can be little or big endian,
527  * depending on the guest endianness. So expose helpers to all of them.
528  */
529 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
530 {
531 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
532 	/* Only Book3S_64 PR supports bi-endian for now */
533 	return vcpu->arch.shared_big_endian;
534 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
535 	/* Book3s_64 HV on little endian is always little endian */
536 	return false;
537 #else
538 	return true;
539 #endif
540 }
541 
542 #define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
543 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
544 {									\
545 	return mfspr(bookehv_spr);					\
546 }									\
547 
548 #define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
549 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
550 {									\
551 	mtspr(bookehv_spr, val);						\
552 }									\
553 
554 #define SHARED_WRAPPER_GET(reg, size)					\
555 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
556 {									\
557 	if (kvmppc_shared_big_endian(vcpu))				\
558 	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
559 	else								\
560 	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
561 }									\
562 
563 #define SHARED_WRAPPER_SET(reg, size)					\
564 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
565 {									\
566 	if (kvmppc_shared_big_endian(vcpu))				\
567 	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
568 	else								\
569 	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
570 }									\
571 
572 #define SHARED_WRAPPER(reg, size)					\
573 	SHARED_WRAPPER_GET(reg, size)					\
574 	SHARED_WRAPPER_SET(reg, size)					\
575 
576 #define SPRNG_WRAPPER(reg, bookehv_spr)					\
577 	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
578 	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
579 
580 #ifdef CONFIG_KVM_BOOKE_HV
581 
582 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
583 	SPRNG_WRAPPER(reg, bookehv_spr)					\
584 
585 #else
586 
587 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
588 	SHARED_WRAPPER(reg, size)					\
589 
590 #endif
591 
592 SHARED_WRAPPER(critical, 64)
593 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
594 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
595 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
596 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
597 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
598 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
599 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
600 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
601 SHARED_WRAPPER_GET(msr, 64)
602 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
603 {
604 	if (kvmppc_shared_big_endian(vcpu))
605 	       vcpu->arch.shared->msr = cpu_to_be64(val);
606 	else
607 	       vcpu->arch.shared->msr = cpu_to_le64(val);
608 }
609 SHARED_WRAPPER(dsisr, 32)
610 SHARED_WRAPPER(int_pending, 32)
611 SHARED_WRAPPER(sprg4, 64)
612 SHARED_WRAPPER(sprg5, 64)
613 SHARED_WRAPPER(sprg6, 64)
614 SHARED_WRAPPER(sprg7, 64)
615 
616 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
617 {
618 	if (kvmppc_shared_big_endian(vcpu))
619 	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
620 	else
621 	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
622 }
623 
624 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
625 {
626 	if (kvmppc_shared_big_endian(vcpu))
627 	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
628 	else
629 	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
630 }
631 
632 /*
633  * Please call after prepare_to_enter. This function puts the lazy ee and irq
634  * disabled tracking state back to normal mode, without actually enabling
635  * interrupts.
636  */
637 static inline void kvmppc_fix_ee_before_entry(void)
638 {
639 	trace_hardirqs_on();
640 
641 #ifdef CONFIG_PPC64
642 	/*
643 	 * To avoid races, the caller must have gone directly from having
644 	 * interrupts fully-enabled to hard-disabled.
645 	 */
646 	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
647 
648 	/* Only need to enable IRQs by hard enabling them after this */
649 	local_paca->irq_happened = 0;
650 	local_paca->soft_enabled = 1;
651 #endif
652 }
653 
654 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
655 {
656 	ulong ea;
657 	ulong msr_64bit = 0;
658 
659 	ea = kvmppc_get_gpr(vcpu, rb);
660 	if (ra)
661 		ea += kvmppc_get_gpr(vcpu, ra);
662 
663 #if defined(CONFIG_PPC_BOOK3E_64)
664 	msr_64bit = MSR_CM;
665 #elif defined(CONFIG_PPC_BOOK3S_64)
666 	msr_64bit = MSR_SF;
667 #endif
668 
669 	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
670 		ea = (uint32_t)ea;
671 
672 	return ea;
673 }
674 
675 extern void xics_wake_cpu(int cpu);
676 
677 #endif /* __POWERPC_KVM_PPC_H__ */
678