xref: /openbmc/linux/arch/powerpc/include/asm/kvm_ppc.h (revision a36954f5)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2008
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  */
19 
20 #ifndef __POWERPC_KVM_PPC_H__
21 #define __POWERPC_KVM_PPC_H__
22 
23 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24  * dependencies. */
25 
26 #include <linux/mutex.h>
27 #include <linux/timer.h>
28 #include <linux/types.h>
29 #include <linux/kvm_types.h>
30 #include <linux/kvm_host.h>
31 #include <linux/bug.h>
32 #ifdef CONFIG_PPC_BOOK3S
33 #include <asm/kvm_book3s.h>
34 #else
35 #include <asm/kvm_booke.h>
36 #endif
37 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38 #include <asm/paca.h>
39 #endif
40 
41 /*
42  * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
43  * for supporting software breakpoint.
44  */
45 #define KVMPPC_INST_SW_BREAKPOINT	0x00dddd00
46 
47 enum emulation_result {
48 	EMULATE_DONE,         /* no further processing */
49 	EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
50 	EMULATE_FAIL,         /* can't emulate this instruction */
51 	EMULATE_AGAIN,        /* something went wrong. go again */
52 	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
53 };
54 
55 enum instruction_type {
56 	INST_GENERIC,
57 	INST_SC,		/* system call */
58 };
59 
60 enum xlate_instdata {
61 	XLATE_INST,		/* translate instruction address */
62 	XLATE_DATA		/* translate data address */
63 };
64 
65 enum xlate_readwrite {
66 	XLATE_READ,		/* check for read permissions */
67 	XLATE_WRITE		/* check for write permissions */
68 };
69 
70 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
71 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
72 extern void kvmppc_handler_highmem(void);
73 
74 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
75 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
76                               unsigned int rt, unsigned int bytes,
77 			      int is_default_endian);
78 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
79                                unsigned int rt, unsigned int bytes,
80 			       int is_default_endian);
81 extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
82 				unsigned int rt, unsigned int bytes,
83 			int is_default_endian, int mmio_sign_extend);
84 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
85 			       u64 val, unsigned int bytes,
86 			       int is_default_endian);
87 extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
88 				int rs, unsigned int bytes,
89 				int is_default_endian);
90 
91 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
92 				 enum instruction_type type, u32 *inst);
93 
94 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
95 		     bool data);
96 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
97 		     bool data);
98 extern int kvmppc_emulate_instruction(struct kvm_run *run,
99                                       struct kvm_vcpu *vcpu);
100 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
101 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
102 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
103 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
104 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
105 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
106 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
107 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
108 
109 /* Core-specific hooks */
110 
111 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
112                            unsigned int gtlb_idx);
113 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
114 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
115 extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
116 extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
117 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
118 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
119 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
120                               gva_t eaddr);
121 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
122 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
123 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
124 			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
125 			struct kvmppc_pte *pte);
126 
127 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
128                                                 unsigned int id);
129 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
130 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
131 extern int kvmppc_core_check_processor_compat(void);
132 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
133                                       struct kvm_translation *tr);
134 
135 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
136 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
137 
138 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
139 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
140 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
141 extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
142 extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
143 extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
144 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
145 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
146 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
147                                        struct kvm_interrupt *irq);
148 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
149 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
150 					ulong esr_flags);
151 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
152 					   ulong dear_flags,
153 					   ulong esr_flags);
154 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
155 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
156 					   ulong esr_flags);
157 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
158 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
159 
160 extern int kvmppc_booke_init(void);
161 extern void kvmppc_booke_exit(void);
162 
163 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
164 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
165 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
166 
167 extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
168 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
169 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
170 extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
171 extern long kvmppc_prepare_vrma(struct kvm *kvm,
172 				struct kvm_userspace_memory_region *mem);
173 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
174 			struct kvm_memory_slot *memslot, unsigned long porder);
175 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
176 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
177 		struct iommu_group *grp);
178 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
179 		struct iommu_group *grp);
180 
181 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
182 				struct kvm_create_spapr_tce_64 *args);
183 extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
184 		struct kvm *kvm, unsigned long liobn);
185 #define kvmppc_ioba_validate(stt, ioba, npages)                         \
186 		(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
187 				(stt)->size, (ioba), (npages)) ?        \
188 				H_PARAMETER : H_SUCCESS)
189 extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
190 		unsigned long tce);
191 extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
192 		unsigned long *ua, unsigned long **prmap);
193 extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
194 		unsigned long idx, unsigned long tce);
195 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
196 			     unsigned long ioba, unsigned long tce);
197 extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
198 		unsigned long liobn, unsigned long ioba,
199 		unsigned long tce_list, unsigned long npages);
200 extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
201 		unsigned long liobn, unsigned long ioba,
202 		unsigned long tce_value, unsigned long npages);
203 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
204 			     unsigned long ioba);
205 extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
206 extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
207 extern int kvmppc_core_init_vm(struct kvm *kvm);
208 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
209 extern void kvmppc_core_free_memslot(struct kvm *kvm,
210 				     struct kvm_memory_slot *free,
211 				     struct kvm_memory_slot *dont);
212 extern int kvmppc_core_create_memslot(struct kvm *kvm,
213 				      struct kvm_memory_slot *slot,
214 				      unsigned long npages);
215 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
216 				struct kvm_memory_slot *memslot,
217 				const struct kvm_userspace_memory_region *mem);
218 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
219 				const struct kvm_userspace_memory_region *mem,
220 				const struct kvm_memory_slot *old,
221 				const struct kvm_memory_slot *new);
222 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
223 				      struct kvm_ppc_smmu_info *info);
224 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
225 				      struct kvm_memory_slot *memslot);
226 
227 extern int kvmppc_bookehv_init(void);
228 extern void kvmppc_bookehv_exit(void);
229 
230 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
231 
232 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
233 extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
234 					    struct kvm_ppc_resize_hpt *rhpt);
235 extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
236 					   struct kvm_ppc_resize_hpt *rhpt);
237 
238 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
239 
240 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
241 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
242 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
243 
244 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
245 				u32 priority);
246 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
247 				u32 *priority);
248 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
249 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
250 
251 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
252 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
253 
254 union kvmppc_one_reg {
255 	u32	wval;
256 	u64	dval;
257 	vector128 vval;
258 	u64	vsxval[2];
259 	u32	vsx32val[4];
260 	struct {
261 		u64	addr;
262 		u64	length;
263 	}	vpaval;
264 };
265 
266 struct kvmppc_ops {
267 	struct module *owner;
268 	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
269 	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
270 	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
271 			   union kvmppc_one_reg *val);
272 	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
273 			   union kvmppc_one_reg *val);
274 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
275 	void (*vcpu_put)(struct kvm_vcpu *vcpu);
276 	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
277 	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
278 	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
279 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
280 	int (*check_requests)(struct kvm_vcpu *vcpu);
281 	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
282 	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
283 	int (*prepare_memory_region)(struct kvm *kvm,
284 				     struct kvm_memory_slot *memslot,
285 				     const struct kvm_userspace_memory_region *mem);
286 	void (*commit_memory_region)(struct kvm *kvm,
287 				     const struct kvm_userspace_memory_region *mem,
288 				     const struct kvm_memory_slot *old,
289 				     const struct kvm_memory_slot *new);
290 	int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
291 	int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
292 			   unsigned long end);
293 	int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
294 	int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
295 	void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
296 	void (*mmu_destroy)(struct kvm_vcpu *vcpu);
297 	void (*free_memslot)(struct kvm_memory_slot *free,
298 			     struct kvm_memory_slot *dont);
299 	int (*create_memslot)(struct kvm_memory_slot *slot,
300 			      unsigned long npages);
301 	int (*init_vm)(struct kvm *kvm);
302 	void (*destroy_vm)(struct kvm *kvm);
303 	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
304 	int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
305 			  unsigned int inst, int *advance);
306 	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
307 	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
308 	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
309 	long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
310 			      unsigned long arg);
311 	int (*hcall_implemented)(unsigned long hcall);
312 	int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
313 				       struct irq_bypass_producer *);
314 	void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
315 					struct irq_bypass_producer *);
316 	int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
317 	int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
318 };
319 
320 extern struct kvmppc_ops *kvmppc_hv_ops;
321 extern struct kvmppc_ops *kvmppc_pr_ops;
322 
323 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
324 					enum instruction_type type, u32 *inst)
325 {
326 	int ret = EMULATE_DONE;
327 	u32 fetched_inst;
328 
329 	/* Load the instruction manually if it failed to do so in the
330 	 * exit path */
331 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
332 		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
333 
334 	/*  Write fetch_failed unswapped if the fetch failed */
335 	if (ret == EMULATE_DONE)
336 		fetched_inst = kvmppc_need_byteswap(vcpu) ?
337 				swab32(vcpu->arch.last_inst) :
338 				vcpu->arch.last_inst;
339 	else
340 		fetched_inst = vcpu->arch.last_inst;
341 
342 	*inst = fetched_inst;
343 	return ret;
344 }
345 
346 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
347 {
348 	return kvm->arch.kvm_ops == kvmppc_hv_ops;
349 }
350 
351 extern int kvmppc_hwrng_present(void);
352 
353 /*
354  * Cuts out inst bits with ordering according to spec.
355  * That means the leftmost bit is zero. All given bits are included.
356  */
357 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
358 {
359 	u32 r;
360 	u32 mask;
361 
362 	BUG_ON(msb > lsb);
363 
364 	mask = (1 << (lsb - msb + 1)) - 1;
365 	r = (inst >> (63 - lsb)) & mask;
366 
367 	return r;
368 }
369 
370 /*
371  * Replaces inst bits with ordering according to spec.
372  */
373 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
374 {
375 	u32 r;
376 	u32 mask;
377 
378 	BUG_ON(msb > lsb);
379 
380 	mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
381 	r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
382 
383 	return r;
384 }
385 
386 #define one_reg_size(id)	\
387 	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
388 
389 #define get_reg_val(id, reg)	({		\
390 	union kvmppc_one_reg __u;		\
391 	switch (one_reg_size(id)) {		\
392 	case 4: __u.wval = (reg); break;	\
393 	case 8: __u.dval = (reg); break;	\
394 	default: BUG();				\
395 	}					\
396 	__u;					\
397 })
398 
399 
400 #define set_reg_val(id, val)	({		\
401 	u64 __v;				\
402 	switch (one_reg_size(id)) {		\
403 	case 4: __v = (val).wval; break;	\
404 	case 8: __v = (val).dval; break;	\
405 	default: BUG();				\
406 	}					\
407 	__v;					\
408 })
409 
410 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
411 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
412 
413 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
414 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
415 
416 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
417 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
418 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
419 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
420 
421 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
422 
423 struct openpic;
424 
425 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
426 extern void kvm_cma_reserve(void) __init;
427 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
428 {
429 	paca[cpu].kvm_hstate.xics_phys = (void __iomem *)addr;
430 }
431 
432 static inline void kvmppc_set_xive_tima(int cpu,
433 					unsigned long phys_addr,
434 					void __iomem *virt_addr)
435 {
436 	paca[cpu].kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
437 	paca[cpu].kvm_hstate.xive_tima_virt = virt_addr;
438 }
439 
440 static inline u32 kvmppc_get_xics_latch(void)
441 {
442 	u32 xirr;
443 
444 	xirr = get_paca()->kvm_hstate.saved_xirr;
445 	get_paca()->kvm_hstate.saved_xirr = 0;
446 	return xirr;
447 }
448 
449 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
450 {
451 	paca[cpu].kvm_hstate.host_ipi = host_ipi;
452 }
453 
454 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
455 {
456 	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
457 }
458 
459 extern void kvm_hv_vm_activated(void);
460 extern void kvm_hv_vm_deactivated(void);
461 extern bool kvm_hv_mode_active(void);
462 
463 #else
464 static inline void __init kvm_cma_reserve(void)
465 {}
466 
467 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
468 {}
469 
470 static inline void kvmppc_set_xive_tima(int cpu,
471 					unsigned long phys_addr,
472 					void __iomem *virt_addr)
473 {}
474 
475 static inline u32 kvmppc_get_xics_latch(void)
476 {
477 	return 0;
478 }
479 
480 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
481 {}
482 
483 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
484 {
485 	kvm_vcpu_kick(vcpu);
486 }
487 
488 static inline bool kvm_hv_mode_active(void)		{ return false; }
489 
490 #endif
491 
492 #ifdef CONFIG_KVM_XICS
493 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
494 {
495 	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
496 }
497 
498 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
499 				struct kvm *kvm)
500 {
501 	if (kvm && kvm_irq_bypass)
502 		return kvm->arch.pimap;
503 	return NULL;
504 }
505 
506 extern void kvmppc_alloc_host_rm_ops(void);
507 extern void kvmppc_free_host_rm_ops(void);
508 extern void kvmppc_free_pimap(struct kvm *kvm);
509 extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
510 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
511 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
512 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
513 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
514 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
515 			struct kvm_vcpu *vcpu, u32 cpu);
516 extern void kvmppc_xics_ipi_action(void);
517 extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
518 				   unsigned long host_irq);
519 extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
520 				   unsigned long host_irq);
521 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
522 					struct kvmppc_irq_map *irq_map,
523 					struct kvmppc_passthru_irqmap *pimap,
524 					bool *again);
525 
526 extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
527 			       int level, bool line_status);
528 
529 extern int h_ipi_redirect;
530 #else
531 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
532 				struct kvm *kvm)
533 	{ return NULL; }
534 static inline void kvmppc_alloc_host_rm_ops(void) {};
535 static inline void kvmppc_free_host_rm_ops(void) {};
536 static inline void kvmppc_free_pimap(struct kvm *kvm) {};
537 static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
538 	{ return 0; }
539 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
540 	{ return 0; }
541 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
542 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
543 	{ return 0; }
544 #endif
545 
546 #ifdef CONFIG_KVM_XIVE
547 /*
548  * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
549  * ie. P9 new interrupt controller, while the second "xive" is the legacy
550  * "eXternal Interrupt Vector Entry" which is the configuration of an
551  * interrupt on the "xics" interrupt controller on P8 and earlier. Those
552  * two function consume or produce a legacy "XIVE" state from the
553  * new "XIVE" interrupt controller.
554  */
555 extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
556 				u32 priority);
557 extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
558 				u32 *priority);
559 extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
560 extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
561 extern void kvmppc_xive_init_module(void);
562 extern void kvmppc_xive_exit_module(void);
563 
564 extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
565 				    struct kvm_vcpu *vcpu, u32 cpu);
566 extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
567 extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
568 				  struct irq_desc *host_desc);
569 extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
570 				  struct irq_desc *host_desc);
571 extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
572 extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
573 
574 extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
575 			       int level, bool line_status);
576 #else
577 static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
578 				       u32 priority) { return -1; }
579 static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
580 				       u32 *priority) { return -1; }
581 static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
582 static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
583 static inline void kvmppc_xive_init_module(void) { }
584 static inline void kvmppc_xive_exit_module(void) { }
585 
586 static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
587 					   struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
588 static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
589 static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
590 					 struct irq_desc *host_desc) { return -ENODEV; }
591 static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
592 					 struct irq_desc *host_desc) { return -ENODEV; }
593 static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
594 static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
595 
596 static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
597 				      int level, bool line_status) { return -ENODEV; }
598 #endif /* CONFIG_KVM_XIVE */
599 
600 /*
601  * Prototypes for functions called only from assembler code.
602  * Having prototypes reduces sparse errors.
603  */
604 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
605 			 unsigned long ioba, unsigned long tce);
606 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
607 				  unsigned long liobn, unsigned long ioba,
608 				  unsigned long tce_list, unsigned long npages);
609 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
610 			   unsigned long liobn, unsigned long ioba,
611 			   unsigned long tce_value, unsigned long npages);
612 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
613                             unsigned int yield_count);
614 long kvmppc_h_random(struct kvm_vcpu *vcpu);
615 void kvmhv_commence_exit(int trap);
616 long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
617 void kvmppc_subcore_enter_guest(void);
618 void kvmppc_subcore_exit_guest(void);
619 long kvmppc_realmode_hmi_handler(void);
620 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
621                     long pte_index, unsigned long pteh, unsigned long ptel);
622 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
623                      unsigned long pte_index, unsigned long avpn);
624 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
625 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
626                       unsigned long pte_index, unsigned long avpn,
627                       unsigned long va);
628 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
629                    unsigned long pte_index);
630 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
631                         unsigned long pte_index);
632 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
633                         unsigned long pte_index);
634 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
635                           unsigned long slb_v, unsigned int status, bool data);
636 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
637 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
638 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
639 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
640                     unsigned long mfrr);
641 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
642 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
643 
644 /*
645  * Host-side operations we want to set up while running in real
646  * mode in the guest operating on the xics.
647  * Currently only VCPU wakeup is supported.
648  */
649 
650 union kvmppc_rm_state {
651 	unsigned long raw;
652 	struct {
653 		u32 in_host;
654 		u32 rm_action;
655 	};
656 };
657 
658 struct kvmppc_host_rm_core {
659 	union kvmppc_rm_state rm_state;
660 	void *rm_data;
661 	char pad[112];
662 };
663 
664 struct kvmppc_host_rm_ops {
665 	struct kvmppc_host_rm_core	*rm_core;
666 	void		(*vcpu_kick)(struct kvm_vcpu *vcpu);
667 };
668 
669 extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
670 
671 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
672 {
673 #ifdef CONFIG_KVM_BOOKE_HV
674 	return mfspr(SPRN_GEPR);
675 #elif defined(CONFIG_BOOKE)
676 	return vcpu->arch.epr;
677 #else
678 	return 0;
679 #endif
680 }
681 
682 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
683 {
684 #ifdef CONFIG_KVM_BOOKE_HV
685 	mtspr(SPRN_GEPR, epr);
686 #elif defined(CONFIG_BOOKE)
687 	vcpu->arch.epr = epr;
688 #endif
689 }
690 
691 #ifdef CONFIG_KVM_MPIC
692 
693 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
694 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
695 			     u32 cpu);
696 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
697 
698 #else
699 
700 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
701 {
702 }
703 
704 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
705 		struct kvm_vcpu *vcpu, u32 cpu)
706 {
707 	return -EINVAL;
708 }
709 
710 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
711 		struct kvm_vcpu *vcpu)
712 {
713 }
714 
715 #endif /* CONFIG_KVM_MPIC */
716 
717 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
718 			      struct kvm_config_tlb *cfg);
719 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
720 			     struct kvm_dirty_tlb *cfg);
721 
722 long kvmppc_alloc_lpid(void);
723 void kvmppc_claim_lpid(long lpid);
724 void kvmppc_free_lpid(long lpid);
725 void kvmppc_init_lpid(unsigned long nr_lpids);
726 
727 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
728 {
729 	struct page *page;
730 	/*
731 	 * We can only access pages that the kernel maps
732 	 * as memory. Bail out for unmapped ones.
733 	 */
734 	if (!pfn_valid(pfn))
735 		return;
736 
737 	/* Clear i-cache for new pages */
738 	page = pfn_to_page(pfn);
739 	if (!test_bit(PG_arch_1, &page->flags)) {
740 		flush_dcache_icache_page(page);
741 		set_bit(PG_arch_1, &page->flags);
742 	}
743 }
744 
745 /*
746  * Shared struct helpers. The shared struct can be little or big endian,
747  * depending on the guest endianness. So expose helpers to all of them.
748  */
749 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
750 {
751 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
752 	/* Only Book3S_64 PR supports bi-endian for now */
753 	return vcpu->arch.shared_big_endian;
754 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
755 	/* Book3s_64 HV on little endian is always little endian */
756 	return false;
757 #else
758 	return true;
759 #endif
760 }
761 
762 #define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
763 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
764 {									\
765 	return mfspr(bookehv_spr);					\
766 }									\
767 
768 #define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
769 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
770 {									\
771 	mtspr(bookehv_spr, val);						\
772 }									\
773 
774 #define SHARED_WRAPPER_GET(reg, size)					\
775 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
776 {									\
777 	if (kvmppc_shared_big_endian(vcpu))				\
778 	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
779 	else								\
780 	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
781 }									\
782 
783 #define SHARED_WRAPPER_SET(reg, size)					\
784 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
785 {									\
786 	if (kvmppc_shared_big_endian(vcpu))				\
787 	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
788 	else								\
789 	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
790 }									\
791 
792 #define SHARED_WRAPPER(reg, size)					\
793 	SHARED_WRAPPER_GET(reg, size)					\
794 	SHARED_WRAPPER_SET(reg, size)					\
795 
796 #define SPRNG_WRAPPER(reg, bookehv_spr)					\
797 	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
798 	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
799 
800 #ifdef CONFIG_KVM_BOOKE_HV
801 
802 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
803 	SPRNG_WRAPPER(reg, bookehv_spr)					\
804 
805 #else
806 
807 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
808 	SHARED_WRAPPER(reg, size)					\
809 
810 #endif
811 
812 SHARED_WRAPPER(critical, 64)
813 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
814 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
815 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
816 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
817 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
818 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
819 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
820 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
821 SHARED_WRAPPER_GET(msr, 64)
822 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
823 {
824 	if (kvmppc_shared_big_endian(vcpu))
825 	       vcpu->arch.shared->msr = cpu_to_be64(val);
826 	else
827 	       vcpu->arch.shared->msr = cpu_to_le64(val);
828 }
829 SHARED_WRAPPER(dsisr, 32)
830 SHARED_WRAPPER(int_pending, 32)
831 SHARED_WRAPPER(sprg4, 64)
832 SHARED_WRAPPER(sprg5, 64)
833 SHARED_WRAPPER(sprg6, 64)
834 SHARED_WRAPPER(sprg7, 64)
835 
836 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
837 {
838 	if (kvmppc_shared_big_endian(vcpu))
839 	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
840 	else
841 	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
842 }
843 
844 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
845 {
846 	if (kvmppc_shared_big_endian(vcpu))
847 	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
848 	else
849 	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
850 }
851 
852 /*
853  * Please call after prepare_to_enter. This function puts the lazy ee and irq
854  * disabled tracking state back to normal mode, without actually enabling
855  * interrupts.
856  */
857 static inline void kvmppc_fix_ee_before_entry(void)
858 {
859 	trace_hardirqs_on();
860 
861 #ifdef CONFIG_PPC64
862 	/*
863 	 * To avoid races, the caller must have gone directly from having
864 	 * interrupts fully-enabled to hard-disabled.
865 	 */
866 	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
867 
868 	/* Only need to enable IRQs by hard enabling them after this */
869 	local_paca->irq_happened = 0;
870 	local_paca->soft_enabled = 1;
871 #endif
872 }
873 
874 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
875 {
876 	ulong ea;
877 	ulong msr_64bit = 0;
878 
879 	ea = kvmppc_get_gpr(vcpu, rb);
880 	if (ra)
881 		ea += kvmppc_get_gpr(vcpu, ra);
882 
883 #if defined(CONFIG_PPC_BOOK3E_64)
884 	msr_64bit = MSR_CM;
885 #elif defined(CONFIG_PPC_BOOK3S_64)
886 	msr_64bit = MSR_SF;
887 #endif
888 
889 	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
890 		ea = (uint32_t)ea;
891 
892 	return ea;
893 }
894 
895 extern void xics_wake_cpu(int cpu);
896 
897 #endif /* __POWERPC_KVM_PPC_H__ */
898