xref: /openbmc/linux/arch/powerpc/include/asm/kvm_ppc.h (revision 9dae47aba0a055f761176d9297371d5bb24289ec)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2008
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  */
19 
20 #ifndef __POWERPC_KVM_PPC_H__
21 #define __POWERPC_KVM_PPC_H__
22 
23 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24  * dependencies. */
25 
26 #include <linux/mutex.h>
27 #include <linux/timer.h>
28 #include <linux/types.h>
29 #include <linux/kvm_types.h>
30 #include <linux/kvm_host.h>
31 #include <linux/bug.h>
32 #ifdef CONFIG_PPC_BOOK3S
33 #include <asm/kvm_book3s.h>
34 #else
35 #include <asm/kvm_booke.h>
36 #endif
37 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38 #include <asm/paca.h>
39 #endif
40 
41 /*
42  * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
43  * for supporting software breakpoint.
44  */
45 #define KVMPPC_INST_SW_BREAKPOINT	0x00dddd00
46 
47 enum emulation_result {
48 	EMULATE_DONE,         /* no further processing */
49 	EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
50 	EMULATE_FAIL,         /* can't emulate this instruction */
51 	EMULATE_AGAIN,        /* something went wrong. go again */
52 	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
53 };
54 
55 enum instruction_type {
56 	INST_GENERIC,
57 	INST_SC,		/* system call */
58 };
59 
60 enum xlate_instdata {
61 	XLATE_INST,		/* translate instruction address */
62 	XLATE_DATA		/* translate data address */
63 };
64 
65 enum xlate_readwrite {
66 	XLATE_READ,		/* check for read permissions */
67 	XLATE_WRITE		/* check for write permissions */
68 };
69 
70 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
71 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
72 extern void kvmppc_handler_highmem(void);
73 
74 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
75 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
76                               unsigned int rt, unsigned int bytes,
77 			      int is_default_endian);
78 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
79                                unsigned int rt, unsigned int bytes,
80 			       int is_default_endian);
81 extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
82 				unsigned int rt, unsigned int bytes,
83 			int is_default_endian, int mmio_sign_extend);
84 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
85 			       u64 val, unsigned int bytes,
86 			       int is_default_endian);
87 extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
88 				int rs, unsigned int bytes,
89 				int is_default_endian);
90 
91 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
92 				 enum instruction_type type, u32 *inst);
93 
94 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
95 		     bool data);
96 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
97 		     bool data);
98 extern int kvmppc_emulate_instruction(struct kvm_run *run,
99                                       struct kvm_vcpu *vcpu);
100 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
101 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
102 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
103 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
104 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
105 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
106 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
107 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
108 
109 /* Core-specific hooks */
110 
111 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
112                            unsigned int gtlb_idx);
113 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
114 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
115 extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
116 extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
117 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
118 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
119 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
120                               gva_t eaddr);
121 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
122 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
123 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
124 			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
125 			struct kvmppc_pte *pte);
126 
127 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
128                                                 unsigned int id);
129 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
130 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
131 extern int kvmppc_core_check_processor_compat(void);
132 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
133                                       struct kvm_translation *tr);
134 
135 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
136 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
137 
138 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
139 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
140 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
141 extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
142 extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
143 extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
144 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
145 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
146 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
147                                        struct kvm_interrupt *irq);
148 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
149 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
150 					ulong esr_flags);
151 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
152 					   ulong dear_flags,
153 					   ulong esr_flags);
154 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
155 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
156 					   ulong esr_flags);
157 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
158 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
159 
160 extern int kvmppc_booke_init(void);
161 extern void kvmppc_booke_exit(void);
162 
163 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
164 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
165 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
166 
167 extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
168 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
169 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
170 extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
171 extern void kvmppc_rmap_reset(struct kvm *kvm);
172 extern long kvmppc_prepare_vrma(struct kvm *kvm,
173 				struct kvm_userspace_memory_region *mem);
174 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
175 			struct kvm_memory_slot *memslot, unsigned long porder);
176 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
177 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
178 		struct iommu_group *grp);
179 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
180 		struct iommu_group *grp);
181 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
182 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
183 extern void kvmppc_setup_partition_table(struct kvm *kvm);
184 
185 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
186 				struct kvm_create_spapr_tce_64 *args);
187 extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
188 		struct kvm *kvm, unsigned long liobn);
189 #define kvmppc_ioba_validate(stt, ioba, npages)                         \
190 		(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
191 				(stt)->size, (ioba), (npages)) ?        \
192 				H_PARAMETER : H_SUCCESS)
193 extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
194 		unsigned long tce);
195 extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
196 		unsigned long *ua, unsigned long **prmap);
197 extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
198 		unsigned long idx, unsigned long tce);
199 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
200 			     unsigned long ioba, unsigned long tce);
201 extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
202 		unsigned long liobn, unsigned long ioba,
203 		unsigned long tce_list, unsigned long npages);
204 extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
205 		unsigned long liobn, unsigned long ioba,
206 		unsigned long tce_value, unsigned long npages);
207 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
208 			     unsigned long ioba);
209 extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
210 extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
211 extern int kvmppc_core_init_vm(struct kvm *kvm);
212 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
213 extern void kvmppc_core_free_memslot(struct kvm *kvm,
214 				     struct kvm_memory_slot *free,
215 				     struct kvm_memory_slot *dont);
216 extern int kvmppc_core_create_memslot(struct kvm *kvm,
217 				      struct kvm_memory_slot *slot,
218 				      unsigned long npages);
219 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
220 				struct kvm_memory_slot *memslot,
221 				const struct kvm_userspace_memory_region *mem);
222 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
223 				const struct kvm_userspace_memory_region *mem,
224 				const struct kvm_memory_slot *old,
225 				const struct kvm_memory_slot *new);
226 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
227 				      struct kvm_ppc_smmu_info *info);
228 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
229 				      struct kvm_memory_slot *memslot);
230 
231 extern int kvmppc_bookehv_init(void);
232 extern void kvmppc_bookehv_exit(void);
233 
234 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
235 
236 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
237 extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
238 					    struct kvm_ppc_resize_hpt *rhpt);
239 extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
240 					   struct kvm_ppc_resize_hpt *rhpt);
241 
242 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
243 
244 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
245 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
246 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
247 
248 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
249 				u32 priority);
250 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
251 				u32 *priority);
252 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
253 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
254 
255 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
256 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
257 
258 union kvmppc_one_reg {
259 	u32	wval;
260 	u64	dval;
261 	vector128 vval;
262 	u64	vsxval[2];
263 	u32	vsx32val[4];
264 	struct {
265 		u64	addr;
266 		u64	length;
267 	}	vpaval;
268 };
269 
270 struct kvmppc_ops {
271 	struct module *owner;
272 	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
273 	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
274 	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
275 			   union kvmppc_one_reg *val);
276 	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
277 			   union kvmppc_one_reg *val);
278 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
279 	void (*vcpu_put)(struct kvm_vcpu *vcpu);
280 	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
281 	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
282 	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
283 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
284 	int (*check_requests)(struct kvm_vcpu *vcpu);
285 	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
286 	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
287 	int (*prepare_memory_region)(struct kvm *kvm,
288 				     struct kvm_memory_slot *memslot,
289 				     const struct kvm_userspace_memory_region *mem);
290 	void (*commit_memory_region)(struct kvm *kvm,
291 				     const struct kvm_userspace_memory_region *mem,
292 				     const struct kvm_memory_slot *old,
293 				     const struct kvm_memory_slot *new);
294 	int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
295 	int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
296 			   unsigned long end);
297 	int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
298 	int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
299 	void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
300 	void (*mmu_destroy)(struct kvm_vcpu *vcpu);
301 	void (*free_memslot)(struct kvm_memory_slot *free,
302 			     struct kvm_memory_slot *dont);
303 	int (*create_memslot)(struct kvm_memory_slot *slot,
304 			      unsigned long npages);
305 	int (*init_vm)(struct kvm *kvm);
306 	void (*destroy_vm)(struct kvm *kvm);
307 	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
308 	int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
309 			  unsigned int inst, int *advance);
310 	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
311 	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
312 	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
313 	long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
314 			      unsigned long arg);
315 	int (*hcall_implemented)(unsigned long hcall);
316 	int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
317 				       struct irq_bypass_producer *);
318 	void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
319 					struct irq_bypass_producer *);
320 	int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
321 	int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
322 	int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
323 			    unsigned long flags);
324 };
325 
326 extern struct kvmppc_ops *kvmppc_hv_ops;
327 extern struct kvmppc_ops *kvmppc_pr_ops;
328 
329 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
330 					enum instruction_type type, u32 *inst)
331 {
332 	int ret = EMULATE_DONE;
333 	u32 fetched_inst;
334 
335 	/* Load the instruction manually if it failed to do so in the
336 	 * exit path */
337 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
338 		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
339 
340 	/*  Write fetch_failed unswapped if the fetch failed */
341 	if (ret == EMULATE_DONE)
342 		fetched_inst = kvmppc_need_byteswap(vcpu) ?
343 				swab32(vcpu->arch.last_inst) :
344 				vcpu->arch.last_inst;
345 	else
346 		fetched_inst = vcpu->arch.last_inst;
347 
348 	*inst = fetched_inst;
349 	return ret;
350 }
351 
352 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
353 {
354 	return kvm->arch.kvm_ops == kvmppc_hv_ops;
355 }
356 
357 extern int kvmppc_hwrng_present(void);
358 
359 /*
360  * Cuts out inst bits with ordering according to spec.
361  * That means the leftmost bit is zero. All given bits are included.
362  */
363 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
364 {
365 	u32 r;
366 	u32 mask;
367 
368 	BUG_ON(msb > lsb);
369 
370 	mask = (1 << (lsb - msb + 1)) - 1;
371 	r = (inst >> (63 - lsb)) & mask;
372 
373 	return r;
374 }
375 
376 /*
377  * Replaces inst bits with ordering according to spec.
378  */
379 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
380 {
381 	u32 r;
382 	u32 mask;
383 
384 	BUG_ON(msb > lsb);
385 
386 	mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
387 	r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
388 
389 	return r;
390 }
391 
392 #define one_reg_size(id)	\
393 	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
394 
395 #define get_reg_val(id, reg)	({		\
396 	union kvmppc_one_reg __u;		\
397 	switch (one_reg_size(id)) {		\
398 	case 4: __u.wval = (reg); break;	\
399 	case 8: __u.dval = (reg); break;	\
400 	default: BUG();				\
401 	}					\
402 	__u;					\
403 })
404 
405 
406 #define set_reg_val(id, val)	({		\
407 	u64 __v;				\
408 	switch (one_reg_size(id)) {		\
409 	case 4: __v = (val).wval; break;	\
410 	case 8: __v = (val).dval; break;	\
411 	default: BUG();				\
412 	}					\
413 	__v;					\
414 })
415 
416 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
417 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
418 
419 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
420 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
421 
422 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
423 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
424 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
425 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
426 
427 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
428 
429 struct openpic;
430 
431 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
432 extern void kvm_cma_reserve(void) __init;
433 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
434 {
435 	paca[cpu].kvm_hstate.xics_phys = (void __iomem *)addr;
436 }
437 
438 static inline void kvmppc_set_xive_tima(int cpu,
439 					unsigned long phys_addr,
440 					void __iomem *virt_addr)
441 {
442 	paca[cpu].kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
443 	paca[cpu].kvm_hstate.xive_tima_virt = virt_addr;
444 }
445 
446 static inline u32 kvmppc_get_xics_latch(void)
447 {
448 	u32 xirr;
449 
450 	xirr = get_paca()->kvm_hstate.saved_xirr;
451 	get_paca()->kvm_hstate.saved_xirr = 0;
452 	return xirr;
453 }
454 
455 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
456 {
457 	paca[cpu].kvm_hstate.host_ipi = host_ipi;
458 }
459 
460 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
461 {
462 	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
463 }
464 
465 extern void kvm_hv_vm_activated(void);
466 extern void kvm_hv_vm_deactivated(void);
467 extern bool kvm_hv_mode_active(void);
468 
469 #else
470 static inline void __init kvm_cma_reserve(void)
471 {}
472 
473 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
474 {}
475 
476 static inline void kvmppc_set_xive_tima(int cpu,
477 					unsigned long phys_addr,
478 					void __iomem *virt_addr)
479 {}
480 
481 static inline u32 kvmppc_get_xics_latch(void)
482 {
483 	return 0;
484 }
485 
486 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
487 {}
488 
489 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
490 {
491 	kvm_vcpu_kick(vcpu);
492 }
493 
494 static inline bool kvm_hv_mode_active(void)		{ return false; }
495 
496 #endif
497 
498 #ifdef CONFIG_KVM_XICS
499 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
500 {
501 	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
502 }
503 
504 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
505 				struct kvm *kvm)
506 {
507 	if (kvm && kvm_irq_bypass)
508 		return kvm->arch.pimap;
509 	return NULL;
510 }
511 
512 extern void kvmppc_alloc_host_rm_ops(void);
513 extern void kvmppc_free_host_rm_ops(void);
514 extern void kvmppc_free_pimap(struct kvm *kvm);
515 extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
516 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
517 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
518 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
519 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
520 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
521 			struct kvm_vcpu *vcpu, u32 cpu);
522 extern void kvmppc_xics_ipi_action(void);
523 extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
524 				   unsigned long host_irq);
525 extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
526 				   unsigned long host_irq);
527 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
528 					struct kvmppc_irq_map *irq_map,
529 					struct kvmppc_passthru_irqmap *pimap,
530 					bool *again);
531 
532 extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
533 			       int level, bool line_status);
534 
535 extern int h_ipi_redirect;
536 #else
537 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
538 				struct kvm *kvm)
539 	{ return NULL; }
540 static inline void kvmppc_alloc_host_rm_ops(void) {};
541 static inline void kvmppc_free_host_rm_ops(void) {};
542 static inline void kvmppc_free_pimap(struct kvm *kvm) {};
543 static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
544 	{ return 0; }
545 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
546 	{ return 0; }
547 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
548 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
549 	{ return 0; }
550 #endif
551 
552 #ifdef CONFIG_KVM_XIVE
553 /*
554  * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
555  * ie. P9 new interrupt controller, while the second "xive" is the legacy
556  * "eXternal Interrupt Vector Entry" which is the configuration of an
557  * interrupt on the "xics" interrupt controller on P8 and earlier. Those
558  * two function consume or produce a legacy "XIVE" state from the
559  * new "XIVE" interrupt controller.
560  */
561 extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
562 				u32 priority);
563 extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
564 				u32 *priority);
565 extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
566 extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
567 extern void kvmppc_xive_init_module(void);
568 extern void kvmppc_xive_exit_module(void);
569 
570 extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
571 				    struct kvm_vcpu *vcpu, u32 cpu);
572 extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
573 extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
574 				  struct irq_desc *host_desc);
575 extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
576 				  struct irq_desc *host_desc);
577 extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
578 extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
579 
580 extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
581 			       int level, bool line_status);
582 #else
583 static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
584 				       u32 priority) { return -1; }
585 static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
586 				       u32 *priority) { return -1; }
587 static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
588 static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
589 static inline void kvmppc_xive_init_module(void) { }
590 static inline void kvmppc_xive_exit_module(void) { }
591 
592 static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
593 					   struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
594 static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
595 static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
596 					 struct irq_desc *host_desc) { return -ENODEV; }
597 static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
598 					 struct irq_desc *host_desc) { return -ENODEV; }
599 static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
600 static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
601 
602 static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
603 				      int level, bool line_status) { return -ENODEV; }
604 #endif /* CONFIG_KVM_XIVE */
605 
606 /*
607  * Prototypes for functions called only from assembler code.
608  * Having prototypes reduces sparse errors.
609  */
610 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
611 			 unsigned long ioba, unsigned long tce);
612 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
613 				  unsigned long liobn, unsigned long ioba,
614 				  unsigned long tce_list, unsigned long npages);
615 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
616 			   unsigned long liobn, unsigned long ioba,
617 			   unsigned long tce_value, unsigned long npages);
618 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
619                             unsigned int yield_count);
620 long kvmppc_h_random(struct kvm_vcpu *vcpu);
621 void kvmhv_commence_exit(int trap);
622 long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
623 void kvmppc_subcore_enter_guest(void);
624 void kvmppc_subcore_exit_guest(void);
625 long kvmppc_realmode_hmi_handler(void);
626 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
627                     long pte_index, unsigned long pteh, unsigned long ptel);
628 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
629                      unsigned long pte_index, unsigned long avpn);
630 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
631 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
632                       unsigned long pte_index, unsigned long avpn,
633                       unsigned long va);
634 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
635                    unsigned long pte_index);
636 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
637                         unsigned long pte_index);
638 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
639                         unsigned long pte_index);
640 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
641                           unsigned long slb_v, unsigned int status, bool data);
642 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
643 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
644 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
645 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
646                     unsigned long mfrr);
647 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
648 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
649 
650 /*
651  * Host-side operations we want to set up while running in real
652  * mode in the guest operating on the xics.
653  * Currently only VCPU wakeup is supported.
654  */
655 
656 union kvmppc_rm_state {
657 	unsigned long raw;
658 	struct {
659 		u32 in_host;
660 		u32 rm_action;
661 	};
662 };
663 
664 struct kvmppc_host_rm_core {
665 	union kvmppc_rm_state rm_state;
666 	void *rm_data;
667 	char pad[112];
668 };
669 
670 struct kvmppc_host_rm_ops {
671 	struct kvmppc_host_rm_core	*rm_core;
672 	void		(*vcpu_kick)(struct kvm_vcpu *vcpu);
673 };
674 
675 extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
676 
677 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
678 {
679 #ifdef CONFIG_KVM_BOOKE_HV
680 	return mfspr(SPRN_GEPR);
681 #elif defined(CONFIG_BOOKE)
682 	return vcpu->arch.epr;
683 #else
684 	return 0;
685 #endif
686 }
687 
688 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
689 {
690 #ifdef CONFIG_KVM_BOOKE_HV
691 	mtspr(SPRN_GEPR, epr);
692 #elif defined(CONFIG_BOOKE)
693 	vcpu->arch.epr = epr;
694 #endif
695 }
696 
697 #ifdef CONFIG_KVM_MPIC
698 
699 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
700 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
701 			     u32 cpu);
702 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
703 
704 #else
705 
706 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
707 {
708 }
709 
710 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
711 		struct kvm_vcpu *vcpu, u32 cpu)
712 {
713 	return -EINVAL;
714 }
715 
716 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
717 		struct kvm_vcpu *vcpu)
718 {
719 }
720 
721 #endif /* CONFIG_KVM_MPIC */
722 
723 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
724 			      struct kvm_config_tlb *cfg);
725 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
726 			     struct kvm_dirty_tlb *cfg);
727 
728 long kvmppc_alloc_lpid(void);
729 void kvmppc_claim_lpid(long lpid);
730 void kvmppc_free_lpid(long lpid);
731 void kvmppc_init_lpid(unsigned long nr_lpids);
732 
733 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
734 {
735 	struct page *page;
736 	/*
737 	 * We can only access pages that the kernel maps
738 	 * as memory. Bail out for unmapped ones.
739 	 */
740 	if (!pfn_valid(pfn))
741 		return;
742 
743 	/* Clear i-cache for new pages */
744 	page = pfn_to_page(pfn);
745 	if (!test_bit(PG_arch_1, &page->flags)) {
746 		flush_dcache_icache_page(page);
747 		set_bit(PG_arch_1, &page->flags);
748 	}
749 }
750 
751 /*
752  * Shared struct helpers. The shared struct can be little or big endian,
753  * depending on the guest endianness. So expose helpers to all of them.
754  */
755 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
756 {
757 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
758 	/* Only Book3S_64 PR supports bi-endian for now */
759 	return vcpu->arch.shared_big_endian;
760 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
761 	/* Book3s_64 HV on little endian is always little endian */
762 	return false;
763 #else
764 	return true;
765 #endif
766 }
767 
768 #define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
769 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
770 {									\
771 	return mfspr(bookehv_spr);					\
772 }									\
773 
774 #define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
775 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
776 {									\
777 	mtspr(bookehv_spr, val);						\
778 }									\
779 
780 #define SHARED_WRAPPER_GET(reg, size)					\
781 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
782 {									\
783 	if (kvmppc_shared_big_endian(vcpu))				\
784 	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
785 	else								\
786 	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
787 }									\
788 
789 #define SHARED_WRAPPER_SET(reg, size)					\
790 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
791 {									\
792 	if (kvmppc_shared_big_endian(vcpu))				\
793 	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
794 	else								\
795 	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
796 }									\
797 
798 #define SHARED_WRAPPER(reg, size)					\
799 	SHARED_WRAPPER_GET(reg, size)					\
800 	SHARED_WRAPPER_SET(reg, size)					\
801 
802 #define SPRNG_WRAPPER(reg, bookehv_spr)					\
803 	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
804 	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
805 
806 #ifdef CONFIG_KVM_BOOKE_HV
807 
808 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
809 	SPRNG_WRAPPER(reg, bookehv_spr)					\
810 
811 #else
812 
813 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
814 	SHARED_WRAPPER(reg, size)					\
815 
816 #endif
817 
818 SHARED_WRAPPER(critical, 64)
819 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
820 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
821 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
822 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
823 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
824 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
825 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
826 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
827 SHARED_WRAPPER_GET(msr, 64)
828 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
829 {
830 	if (kvmppc_shared_big_endian(vcpu))
831 	       vcpu->arch.shared->msr = cpu_to_be64(val);
832 	else
833 	       vcpu->arch.shared->msr = cpu_to_le64(val);
834 }
835 SHARED_WRAPPER(dsisr, 32)
836 SHARED_WRAPPER(int_pending, 32)
837 SHARED_WRAPPER(sprg4, 64)
838 SHARED_WRAPPER(sprg5, 64)
839 SHARED_WRAPPER(sprg6, 64)
840 SHARED_WRAPPER(sprg7, 64)
841 
842 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
843 {
844 	if (kvmppc_shared_big_endian(vcpu))
845 	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
846 	else
847 	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
848 }
849 
850 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
851 {
852 	if (kvmppc_shared_big_endian(vcpu))
853 	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
854 	else
855 	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
856 }
857 
858 /*
859  * Please call after prepare_to_enter. This function puts the lazy ee and irq
860  * disabled tracking state back to normal mode, without actually enabling
861  * interrupts.
862  */
863 static inline void kvmppc_fix_ee_before_entry(void)
864 {
865 	trace_hardirqs_on();
866 
867 #ifdef CONFIG_PPC64
868 	/*
869 	 * To avoid races, the caller must have gone directly from having
870 	 * interrupts fully-enabled to hard-disabled.
871 	 */
872 	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
873 
874 	/* Only need to enable IRQs by hard enabling them after this */
875 	local_paca->irq_happened = 0;
876 	local_paca->soft_enabled = 1;
877 #endif
878 }
879 
880 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
881 {
882 	ulong ea;
883 	ulong msr_64bit = 0;
884 
885 	ea = kvmppc_get_gpr(vcpu, rb);
886 	if (ra)
887 		ea += kvmppc_get_gpr(vcpu, ra);
888 
889 #if defined(CONFIG_PPC_BOOK3E_64)
890 	msr_64bit = MSR_CM;
891 #elif defined(CONFIG_PPC_BOOK3S_64)
892 	msr_64bit = MSR_SF;
893 #endif
894 
895 	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
896 		ea = (uint32_t)ea;
897 
898 	return ea;
899 }
900 
901 extern void xics_wake_cpu(int cpu);
902 
903 #endif /* __POWERPC_KVM_PPC_H__ */
904