xref: /openbmc/linux/arch/powerpc/include/asm/kvm_ppc.h (revision c819e2cf)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2008
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  */
19 
20 #ifndef __POWERPC_KVM_PPC_H__
21 #define __POWERPC_KVM_PPC_H__
22 
23 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24  * dependencies. */
25 
26 #include <linux/mutex.h>
27 #include <linux/timer.h>
28 #include <linux/types.h>
29 #include <linux/kvm_types.h>
30 #include <linux/kvm_host.h>
31 #include <linux/bug.h>
32 #ifdef CONFIG_PPC_BOOK3S
33 #include <asm/kvm_book3s.h>
34 #else
35 #include <asm/kvm_booke.h>
36 #endif
37 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38 #include <asm/paca.h>
39 #endif
40 
41 /*
42  * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
43  * for supporting software breakpoint.
44  */
45 #define KVMPPC_INST_SW_BREAKPOINT	0x00dddd00
46 
47 enum emulation_result {
48 	EMULATE_DONE,         /* no further processing */
49 	EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
50 	EMULATE_FAIL,         /* can't emulate this instruction */
51 	EMULATE_AGAIN,        /* something went wrong. go again */
52 	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
53 };
54 
55 enum instruction_type {
56 	INST_GENERIC,
57 	INST_SC,		/* system call */
58 };
59 
60 enum xlate_instdata {
61 	XLATE_INST,		/* translate instruction address */
62 	XLATE_DATA		/* translate data address */
63 };
64 
65 enum xlate_readwrite {
66 	XLATE_READ,		/* check for read permissions */
67 	XLATE_WRITE		/* check for write permissions */
68 };
69 
70 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
71 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
72 extern void kvmppc_handler_highmem(void);
73 
74 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
75 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
76                               unsigned int rt, unsigned int bytes,
77 			      int is_default_endian);
78 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
79                                unsigned int rt, unsigned int bytes,
80 			       int is_default_endian);
81 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
82 			       u64 val, unsigned int bytes,
83 			       int is_default_endian);
84 
85 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
86 				 enum instruction_type type, u32 *inst);
87 
88 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
89 		     bool data);
90 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
91 		     bool data);
92 extern int kvmppc_emulate_instruction(struct kvm_run *run,
93                                       struct kvm_vcpu *vcpu);
94 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
95 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
96 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
97 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
98 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
99 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
100 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
101 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
102 
103 /* Core-specific hooks */
104 
105 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
106                            unsigned int gtlb_idx);
107 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
108 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
109 extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
110 extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
111 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
112 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
113 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
114                               gva_t eaddr);
115 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
116 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
117 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
118 			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
119 			struct kvmppc_pte *pte);
120 
121 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
122                                                 unsigned int id);
123 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
124 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
125 extern int kvmppc_core_check_processor_compat(void);
126 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
127                                       struct kvm_translation *tr);
128 
129 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
130 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
131 
132 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
133 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
134 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
135 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
136 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
137 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
138                                        struct kvm_interrupt *irq);
139 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
140 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
141 					ulong esr_flags);
142 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
143 					   ulong dear_flags,
144 					   ulong esr_flags);
145 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
146 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
147 					   ulong esr_flags);
148 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
149 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
150 
151 extern int kvmppc_booke_init(void);
152 extern void kvmppc_booke_exit(void);
153 
154 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
155 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
156 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
157 
158 extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp);
159 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp);
160 extern void kvmppc_free_hpt(struct kvm *kvm);
161 extern long kvmppc_prepare_vrma(struct kvm *kvm,
162 				struct kvm_userspace_memory_region *mem);
163 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
164 			struct kvm_memory_slot *memslot, unsigned long porder);
165 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
166 
167 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
168 				struct kvm_create_spapr_tce *args);
169 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
170 			     unsigned long ioba, unsigned long tce);
171 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
172 			     unsigned long ioba);
173 extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
174 extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
175 extern int kvmppc_core_init_vm(struct kvm *kvm);
176 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
177 extern void kvmppc_core_free_memslot(struct kvm *kvm,
178 				     struct kvm_memory_slot *free,
179 				     struct kvm_memory_slot *dont);
180 extern int kvmppc_core_create_memslot(struct kvm *kvm,
181 				      struct kvm_memory_slot *slot,
182 				      unsigned long npages);
183 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
184 				struct kvm_memory_slot *memslot,
185 				struct kvm_userspace_memory_region *mem);
186 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
187 				struct kvm_userspace_memory_region *mem,
188 				const struct kvm_memory_slot *old);
189 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
190 				      struct kvm_ppc_smmu_info *info);
191 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
192 				      struct kvm_memory_slot *memslot);
193 
194 extern int kvmppc_bookehv_init(void);
195 extern void kvmppc_bookehv_exit(void);
196 
197 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
198 
199 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
200 
201 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
202 
203 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
204 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
205 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
206 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
207 				u32 priority);
208 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
209 				u32 *priority);
210 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
211 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
212 
213 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
214 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
215 
216 union kvmppc_one_reg {
217 	u32	wval;
218 	u64	dval;
219 	vector128 vval;
220 	u64	vsxval[2];
221 	struct {
222 		u64	addr;
223 		u64	length;
224 	}	vpaval;
225 };
226 
227 struct kvmppc_ops {
228 	struct module *owner;
229 	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
230 	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
231 	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
232 			   union kvmppc_one_reg *val);
233 	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
234 			   union kvmppc_one_reg *val);
235 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
236 	void (*vcpu_put)(struct kvm_vcpu *vcpu);
237 	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
238 	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
239 	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
240 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
241 	int (*check_requests)(struct kvm_vcpu *vcpu);
242 	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
243 	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
244 	int (*prepare_memory_region)(struct kvm *kvm,
245 				     struct kvm_memory_slot *memslot,
246 				     struct kvm_userspace_memory_region *mem);
247 	void (*commit_memory_region)(struct kvm *kvm,
248 				     struct kvm_userspace_memory_region *mem,
249 				     const struct kvm_memory_slot *old);
250 	int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
251 	int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
252 			   unsigned long end);
253 	int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
254 	int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
255 	void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
256 	void (*mmu_destroy)(struct kvm_vcpu *vcpu);
257 	void (*free_memslot)(struct kvm_memory_slot *free,
258 			     struct kvm_memory_slot *dont);
259 	int (*create_memslot)(struct kvm_memory_slot *slot,
260 			      unsigned long npages);
261 	int (*init_vm)(struct kvm *kvm);
262 	void (*destroy_vm)(struct kvm *kvm);
263 	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
264 	int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
265 			  unsigned int inst, int *advance);
266 	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
267 	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
268 	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
269 	long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
270 			      unsigned long arg);
271 	int (*hcall_implemented)(unsigned long hcall);
272 };
273 
274 extern struct kvmppc_ops *kvmppc_hv_ops;
275 extern struct kvmppc_ops *kvmppc_pr_ops;
276 
277 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
278 					enum instruction_type type, u32 *inst)
279 {
280 	int ret = EMULATE_DONE;
281 	u32 fetched_inst;
282 
283 	/* Load the instruction manually if it failed to do so in the
284 	 * exit path */
285 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
286 		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
287 
288 	/*  Write fetch_failed unswapped if the fetch failed */
289 	if (ret == EMULATE_DONE)
290 		fetched_inst = kvmppc_need_byteswap(vcpu) ?
291 				swab32(vcpu->arch.last_inst) :
292 				vcpu->arch.last_inst;
293 	else
294 		fetched_inst = vcpu->arch.last_inst;
295 
296 	*inst = fetched_inst;
297 	return ret;
298 }
299 
300 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
301 {
302 	return kvm->arch.kvm_ops == kvmppc_hv_ops;
303 }
304 
305 /*
306  * Cuts out inst bits with ordering according to spec.
307  * That means the leftmost bit is zero. All given bits are included.
308  */
309 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
310 {
311 	u32 r;
312 	u32 mask;
313 
314 	BUG_ON(msb > lsb);
315 
316 	mask = (1 << (lsb - msb + 1)) - 1;
317 	r = (inst >> (63 - lsb)) & mask;
318 
319 	return r;
320 }
321 
322 /*
323  * Replaces inst bits with ordering according to spec.
324  */
325 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
326 {
327 	u32 r;
328 	u32 mask;
329 
330 	BUG_ON(msb > lsb);
331 
332 	mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
333 	r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
334 
335 	return r;
336 }
337 
338 #define one_reg_size(id)	\
339 	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
340 
341 #define get_reg_val(id, reg)	({		\
342 	union kvmppc_one_reg __u;		\
343 	switch (one_reg_size(id)) {		\
344 	case 4: __u.wval = (reg); break;	\
345 	case 8: __u.dval = (reg); break;	\
346 	default: BUG();				\
347 	}					\
348 	__u;					\
349 })
350 
351 
352 #define set_reg_val(id, val)	({		\
353 	u64 __v;				\
354 	switch (one_reg_size(id)) {		\
355 	case 4: __v = (val).wval; break;	\
356 	case 8: __v = (val).dval; break;	\
357 	default: BUG();				\
358 	}					\
359 	__v;					\
360 })
361 
362 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
363 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
364 
365 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
366 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
367 
368 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
369 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
370 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
371 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
372 
373 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
374 
375 struct openpic;
376 
377 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
378 extern void kvm_cma_reserve(void) __init;
379 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
380 {
381 	paca[cpu].kvm_hstate.xics_phys = addr;
382 }
383 
384 static inline u32 kvmppc_get_xics_latch(void)
385 {
386 	u32 xirr;
387 
388 	xirr = get_paca()->kvm_hstate.saved_xirr;
389 	get_paca()->kvm_hstate.saved_xirr = 0;
390 	return xirr;
391 }
392 
393 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
394 {
395 	paca[cpu].kvm_hstate.host_ipi = host_ipi;
396 }
397 
398 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
399 {
400 	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
401 }
402 
403 extern void kvm_hv_vm_activated(void);
404 extern void kvm_hv_vm_deactivated(void);
405 extern bool kvm_hv_mode_active(void);
406 
407 #else
408 static inline void __init kvm_cma_reserve(void)
409 {}
410 
411 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
412 {}
413 
414 static inline u32 kvmppc_get_xics_latch(void)
415 {
416 	return 0;
417 }
418 
419 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
420 {}
421 
422 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
423 {
424 	kvm_vcpu_kick(vcpu);
425 }
426 
427 static inline bool kvm_hv_mode_active(void)		{ return false; }
428 
429 #endif
430 
431 #ifdef CONFIG_KVM_XICS
432 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
433 {
434 	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
435 }
436 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
437 extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
438 extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
439 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
440 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
441 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
442 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
443 			struct kvm_vcpu *vcpu, u32 cpu);
444 #else
445 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
446 	{ return 0; }
447 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
448 static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
449 					 unsigned long server)
450 	{ return -EINVAL; }
451 static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
452 					struct kvm_irq_level *args)
453 	{ return -ENOTTY; }
454 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
455 	{ return 0; }
456 #endif
457 
458 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
459 {
460 #ifdef CONFIG_KVM_BOOKE_HV
461 	return mfspr(SPRN_GEPR);
462 #elif defined(CONFIG_BOOKE)
463 	return vcpu->arch.epr;
464 #else
465 	return 0;
466 #endif
467 }
468 
469 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
470 {
471 #ifdef CONFIG_KVM_BOOKE_HV
472 	mtspr(SPRN_GEPR, epr);
473 #elif defined(CONFIG_BOOKE)
474 	vcpu->arch.epr = epr;
475 #endif
476 }
477 
478 #ifdef CONFIG_KVM_MPIC
479 
480 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
481 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
482 			     u32 cpu);
483 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
484 
485 #else
486 
487 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
488 {
489 }
490 
491 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
492 		struct kvm_vcpu *vcpu, u32 cpu)
493 {
494 	return -EINVAL;
495 }
496 
497 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
498 		struct kvm_vcpu *vcpu)
499 {
500 }
501 
502 #endif /* CONFIG_KVM_MPIC */
503 
504 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
505 			      struct kvm_config_tlb *cfg);
506 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
507 			     struct kvm_dirty_tlb *cfg);
508 
509 long kvmppc_alloc_lpid(void);
510 void kvmppc_claim_lpid(long lpid);
511 void kvmppc_free_lpid(long lpid);
512 void kvmppc_init_lpid(unsigned long nr_lpids);
513 
514 static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
515 {
516 	struct page *page;
517 	/*
518 	 * We can only access pages that the kernel maps
519 	 * as memory. Bail out for unmapped ones.
520 	 */
521 	if (!pfn_valid(pfn))
522 		return;
523 
524 	/* Clear i-cache for new pages */
525 	page = pfn_to_page(pfn);
526 	if (!test_bit(PG_arch_1, &page->flags)) {
527 		flush_dcache_icache_page(page);
528 		set_bit(PG_arch_1, &page->flags);
529 	}
530 }
531 
532 /*
533  * Shared struct helpers. The shared struct can be little or big endian,
534  * depending on the guest endianness. So expose helpers to all of them.
535  */
536 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
537 {
538 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
539 	/* Only Book3S_64 PR supports bi-endian for now */
540 	return vcpu->arch.shared_big_endian;
541 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
542 	/* Book3s_64 HV on little endian is always little endian */
543 	return false;
544 #else
545 	return true;
546 #endif
547 }
548 
549 #define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
550 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
551 {									\
552 	return mfspr(bookehv_spr);					\
553 }									\
554 
555 #define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
556 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
557 {									\
558 	mtspr(bookehv_spr, val);						\
559 }									\
560 
561 #define SHARED_WRAPPER_GET(reg, size)					\
562 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
563 {									\
564 	if (kvmppc_shared_big_endian(vcpu))				\
565 	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
566 	else								\
567 	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
568 }									\
569 
570 #define SHARED_WRAPPER_SET(reg, size)					\
571 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
572 {									\
573 	if (kvmppc_shared_big_endian(vcpu))				\
574 	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
575 	else								\
576 	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
577 }									\
578 
579 #define SHARED_WRAPPER(reg, size)					\
580 	SHARED_WRAPPER_GET(reg, size)					\
581 	SHARED_WRAPPER_SET(reg, size)					\
582 
583 #define SPRNG_WRAPPER(reg, bookehv_spr)					\
584 	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
585 	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
586 
587 #ifdef CONFIG_KVM_BOOKE_HV
588 
589 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
590 	SPRNG_WRAPPER(reg, bookehv_spr)					\
591 
592 #else
593 
594 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
595 	SHARED_WRAPPER(reg, size)					\
596 
597 #endif
598 
599 SHARED_WRAPPER(critical, 64)
600 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
601 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
602 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
603 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
604 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
605 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
606 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
607 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
608 SHARED_WRAPPER_GET(msr, 64)
609 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
610 {
611 	if (kvmppc_shared_big_endian(vcpu))
612 	       vcpu->arch.shared->msr = cpu_to_be64(val);
613 	else
614 	       vcpu->arch.shared->msr = cpu_to_le64(val);
615 }
616 SHARED_WRAPPER(dsisr, 32)
617 SHARED_WRAPPER(int_pending, 32)
618 SHARED_WRAPPER(sprg4, 64)
619 SHARED_WRAPPER(sprg5, 64)
620 SHARED_WRAPPER(sprg6, 64)
621 SHARED_WRAPPER(sprg7, 64)
622 
623 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
624 {
625 	if (kvmppc_shared_big_endian(vcpu))
626 	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
627 	else
628 	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
629 }
630 
631 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
632 {
633 	if (kvmppc_shared_big_endian(vcpu))
634 	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
635 	else
636 	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
637 }
638 
639 /*
640  * Please call after prepare_to_enter. This function puts the lazy ee and irq
641  * disabled tracking state back to normal mode, without actually enabling
642  * interrupts.
643  */
644 static inline void kvmppc_fix_ee_before_entry(void)
645 {
646 	trace_hardirqs_on();
647 
648 #ifdef CONFIG_PPC64
649 	/*
650 	 * To avoid races, the caller must have gone directly from having
651 	 * interrupts fully-enabled to hard-disabled.
652 	 */
653 	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
654 
655 	/* Only need to enable IRQs by hard enabling them after this */
656 	local_paca->irq_happened = 0;
657 	local_paca->soft_enabled = 1;
658 #endif
659 }
660 
661 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
662 {
663 	ulong ea;
664 	ulong msr_64bit = 0;
665 
666 	ea = kvmppc_get_gpr(vcpu, rb);
667 	if (ra)
668 		ea += kvmppc_get_gpr(vcpu, ra);
669 
670 #if defined(CONFIG_PPC_BOOK3E_64)
671 	msr_64bit = MSR_CM;
672 #elif defined(CONFIG_PPC_BOOK3S_64)
673 	msr_64bit = MSR_SF;
674 #endif
675 
676 	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
677 		ea = (uint32_t)ea;
678 
679 	return ea;
680 }
681 
682 extern void xics_wake_cpu(int cpu);
683 
684 #endif /* __POWERPC_KVM_PPC_H__ */
685