xref: /openbmc/linux/arch/powerpc/include/asm/kvm_ppc.h (revision 151f4e2b)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2008
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  */
19 
20 #ifndef __POWERPC_KVM_PPC_H__
21 #define __POWERPC_KVM_PPC_H__
22 
23 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24  * dependencies. */
25 
26 #include <linux/mutex.h>
27 #include <linux/timer.h>
28 #include <linux/types.h>
29 #include <linux/kvm_types.h>
30 #include <linux/kvm_host.h>
31 #include <linux/bug.h>
32 #ifdef CONFIG_PPC_BOOK3S
33 #include <asm/kvm_book3s.h>
34 #else
35 #include <asm/kvm_booke.h>
36 #endif
37 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38 #include <asm/paca.h>
39 #include <asm/xive.h>
40 #include <asm/cpu_has_feature.h>
41 #endif
42 
43 /*
44  * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
45  * for supporting software breakpoint.
46  */
47 #define KVMPPC_INST_SW_BREAKPOINT	0x00dddd00
48 
49 enum emulation_result {
50 	EMULATE_DONE,         /* no further processing */
51 	EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
52 	EMULATE_FAIL,         /* can't emulate this instruction */
53 	EMULATE_AGAIN,        /* something went wrong. go again */
54 	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
55 };
56 
57 enum instruction_fetch_type {
58 	INST_GENERIC,
59 	INST_SC,		/* system call */
60 };
61 
62 enum xlate_instdata {
63 	XLATE_INST,		/* translate instruction address */
64 	XLATE_DATA		/* translate data address */
65 };
66 
67 enum xlate_readwrite {
68 	XLATE_READ,		/* check for read permissions */
69 	XLATE_WRITE		/* check for write permissions */
70 };
71 
72 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
73 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
74 extern void kvmppc_handler_highmem(void);
75 
76 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
77 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
78                               unsigned int rt, unsigned int bytes,
79 			      int is_default_endian);
80 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
81                                unsigned int rt, unsigned int bytes,
82 			       int is_default_endian);
83 extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
84 				unsigned int rt, unsigned int bytes,
85 			int is_default_endian, int mmio_sign_extend);
86 extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
87 		unsigned int rt, unsigned int bytes, int is_default_endian);
88 extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
89 		unsigned int rs, unsigned int bytes, int is_default_endian);
90 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
91 			       u64 val, unsigned int bytes,
92 			       int is_default_endian);
93 extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
94 				int rs, unsigned int bytes,
95 				int is_default_endian);
96 
97 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
98 				 enum instruction_fetch_type type, u32 *inst);
99 
100 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
101 		     bool data);
102 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
103 		     bool data);
104 extern int kvmppc_emulate_instruction(struct kvm_run *run,
105                                       struct kvm_vcpu *vcpu);
106 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
107 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
108 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
109 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
110 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
111 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
112 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
113 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
114 
115 /* Core-specific hooks */
116 
117 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
118                            unsigned int gtlb_idx);
119 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
120 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
121 extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
122 extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
123 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
124 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
125 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
126                               gva_t eaddr);
127 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
128 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
129 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
130 			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
131 			struct kvmppc_pte *pte);
132 
133 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
134                                                 unsigned int id);
135 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
136 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
137 extern int kvmppc_core_check_processor_compat(void);
138 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
139                                       struct kvm_translation *tr);
140 
141 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
142 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
143 
144 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
145 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
146 extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
147 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
148 extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
149 extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
150 extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
151 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
152 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
153 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
154                                        struct kvm_interrupt *irq);
155 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
156 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
157 					ulong esr_flags);
158 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
159 					   ulong dear_flags,
160 					   ulong esr_flags);
161 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
162 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
163 					   ulong esr_flags);
164 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
165 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
166 
167 extern int kvmppc_booke_init(void);
168 extern void kvmppc_booke_exit(void);
169 
170 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
171 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
172 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
173 
174 extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
175 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
176 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
177 extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
178 extern void kvmppc_rmap_reset(struct kvm *kvm);
179 extern long kvmppc_prepare_vrma(struct kvm *kvm,
180 				struct kvm_userspace_memory_region *mem);
181 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
182 			struct kvm_memory_slot *memslot, unsigned long porder);
183 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
184 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
185 		struct iommu_group *grp);
186 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
187 		struct iommu_group *grp);
188 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
189 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
190 extern void kvmppc_setup_partition_table(struct kvm *kvm);
191 
192 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
193 				struct kvm_create_spapr_tce_64 *args);
194 extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
195 		struct kvm *kvm, unsigned long liobn);
196 #define kvmppc_ioba_validate(stt, ioba, npages)                         \
197 		(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
198 				(stt)->size, (ioba), (npages)) ?        \
199 				H_PARAMETER : H_SUCCESS)
200 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
201 			     unsigned long ioba, unsigned long tce);
202 extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
203 		unsigned long liobn, unsigned long ioba,
204 		unsigned long tce_list, unsigned long npages);
205 extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
206 		unsigned long liobn, unsigned long ioba,
207 		unsigned long tce_value, unsigned long npages);
208 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
209 			     unsigned long ioba);
210 extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
211 extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
212 extern int kvmppc_core_init_vm(struct kvm *kvm);
213 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
214 extern void kvmppc_core_free_memslot(struct kvm *kvm,
215 				     struct kvm_memory_slot *free,
216 				     struct kvm_memory_slot *dont);
217 extern int kvmppc_core_create_memslot(struct kvm *kvm,
218 				      struct kvm_memory_slot *slot,
219 				      unsigned long npages);
220 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
221 				struct kvm_memory_slot *memslot,
222 				const struct kvm_userspace_memory_region *mem);
223 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
224 				const struct kvm_userspace_memory_region *mem,
225 				const struct kvm_memory_slot *old,
226 				const struct kvm_memory_slot *new,
227 				enum kvm_mr_change change);
228 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
229 				      struct kvm_ppc_smmu_info *info);
230 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
231 				      struct kvm_memory_slot *memslot);
232 
233 extern int kvmppc_bookehv_init(void);
234 extern void kvmppc_bookehv_exit(void);
235 
236 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
237 
238 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
239 extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
240 					    struct kvm_ppc_resize_hpt *rhpt);
241 extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
242 					   struct kvm_ppc_resize_hpt *rhpt);
243 
244 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
245 
246 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
247 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
248 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
249 
250 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
251 				u32 priority);
252 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
253 				u32 *priority);
254 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
255 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
256 
257 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
258 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
259 
260 union kvmppc_one_reg {
261 	u32	wval;
262 	u64	dval;
263 	vector128 vval;
264 	u64	vsxval[2];
265 	u32	vsx32val[4];
266 	u16	vsx16val[8];
267 	u8	vsx8val[16];
268 	struct {
269 		u64	addr;
270 		u64	length;
271 	}	vpaval;
272 	u64	xive_timaval[2];
273 };
274 
275 struct kvmppc_ops {
276 	struct module *owner;
277 	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
278 	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
279 	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
280 			   union kvmppc_one_reg *val);
281 	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
282 			   union kvmppc_one_reg *val);
283 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
284 	void (*vcpu_put)(struct kvm_vcpu *vcpu);
285 	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
286 	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
287 	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
288 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
289 	int (*check_requests)(struct kvm_vcpu *vcpu);
290 	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
291 	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
292 	int (*prepare_memory_region)(struct kvm *kvm,
293 				     struct kvm_memory_slot *memslot,
294 				     const struct kvm_userspace_memory_region *mem);
295 	void (*commit_memory_region)(struct kvm *kvm,
296 				     const struct kvm_userspace_memory_region *mem,
297 				     const struct kvm_memory_slot *old,
298 				     const struct kvm_memory_slot *new,
299 				     enum kvm_mr_change change);
300 	int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
301 			   unsigned long end);
302 	int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
303 	int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
304 	void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
305 	void (*mmu_destroy)(struct kvm_vcpu *vcpu);
306 	void (*free_memslot)(struct kvm_memory_slot *free,
307 			     struct kvm_memory_slot *dont);
308 	int (*create_memslot)(struct kvm_memory_slot *slot,
309 			      unsigned long npages);
310 	int (*init_vm)(struct kvm *kvm);
311 	void (*destroy_vm)(struct kvm *kvm);
312 	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
313 	int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
314 			  unsigned int inst, int *advance);
315 	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
316 	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
317 	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
318 	long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
319 			      unsigned long arg);
320 	int (*hcall_implemented)(unsigned long hcall);
321 	int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
322 				       struct irq_bypass_producer *);
323 	void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
324 					struct irq_bypass_producer *);
325 	int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
326 	int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
327 	int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
328 			    unsigned long flags);
329 	void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
330 	int (*enable_nested)(struct kvm *kvm);
331 	int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
332 			       int size);
333 	int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
334 			      int size);
335 };
336 
337 extern struct kvmppc_ops *kvmppc_hv_ops;
338 extern struct kvmppc_ops *kvmppc_pr_ops;
339 
340 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
341 				enum instruction_fetch_type type, u32 *inst)
342 {
343 	int ret = EMULATE_DONE;
344 	u32 fetched_inst;
345 
346 	/* Load the instruction manually if it failed to do so in the
347 	 * exit path */
348 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
349 		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
350 
351 	/*  Write fetch_failed unswapped if the fetch failed */
352 	if (ret == EMULATE_DONE)
353 		fetched_inst = kvmppc_need_byteswap(vcpu) ?
354 				swab32(vcpu->arch.last_inst) :
355 				vcpu->arch.last_inst;
356 	else
357 		fetched_inst = vcpu->arch.last_inst;
358 
359 	*inst = fetched_inst;
360 	return ret;
361 }
362 
363 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
364 {
365 	return kvm->arch.kvm_ops == kvmppc_hv_ops;
366 }
367 
368 extern int kvmppc_hwrng_present(void);
369 
370 /*
371  * Cuts out inst bits with ordering according to spec.
372  * That means the leftmost bit is zero. All given bits are included.
373  */
374 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
375 {
376 	u32 r;
377 	u32 mask;
378 
379 	BUG_ON(msb > lsb);
380 
381 	mask = (1 << (lsb - msb + 1)) - 1;
382 	r = (inst >> (63 - lsb)) & mask;
383 
384 	return r;
385 }
386 
387 /*
388  * Replaces inst bits with ordering according to spec.
389  */
390 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
391 {
392 	u32 r;
393 	u32 mask;
394 
395 	BUG_ON(msb > lsb);
396 
397 	mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
398 	r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
399 
400 	return r;
401 }
402 
403 #define one_reg_size(id)	\
404 	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
405 
406 #define get_reg_val(id, reg)	({		\
407 	union kvmppc_one_reg __u;		\
408 	switch (one_reg_size(id)) {		\
409 	case 4: __u.wval = (reg); break;	\
410 	case 8: __u.dval = (reg); break;	\
411 	default: BUG();				\
412 	}					\
413 	__u;					\
414 })
415 
416 
417 #define set_reg_val(id, val)	({		\
418 	u64 __v;				\
419 	switch (one_reg_size(id)) {		\
420 	case 4: __v = (val).wval; break;	\
421 	case 8: __v = (val).dval; break;	\
422 	default: BUG();				\
423 	}					\
424 	__v;					\
425 })
426 
427 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
428 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
429 
430 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
431 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
432 
433 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
434 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
435 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
436 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
437 
438 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
439 
440 struct openpic;
441 
442 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
443 extern void kvm_cma_reserve(void) __init;
444 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
445 {
446 	paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
447 }
448 
449 static inline void kvmppc_set_xive_tima(int cpu,
450 					unsigned long phys_addr,
451 					void __iomem *virt_addr)
452 {
453 	paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
454 	paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
455 }
456 
457 static inline u32 kvmppc_get_xics_latch(void)
458 {
459 	u32 xirr;
460 
461 	xirr = get_paca()->kvm_hstate.saved_xirr;
462 	get_paca()->kvm_hstate.saved_xirr = 0;
463 	return xirr;
464 }
465 
466 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
467 {
468 	paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi;
469 }
470 
471 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
472 {
473 	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
474 }
475 
476 extern void kvm_hv_vm_activated(void);
477 extern void kvm_hv_vm_deactivated(void);
478 extern bool kvm_hv_mode_active(void);
479 
480 extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
481 					struct kvm_nested_guest *nested);
482 
483 #else
484 static inline void __init kvm_cma_reserve(void)
485 {}
486 
487 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
488 {}
489 
490 static inline void kvmppc_set_xive_tima(int cpu,
491 					unsigned long phys_addr,
492 					void __iomem *virt_addr)
493 {}
494 
495 static inline u32 kvmppc_get_xics_latch(void)
496 {
497 	return 0;
498 }
499 
500 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
501 {}
502 
503 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
504 {
505 	kvm_vcpu_kick(vcpu);
506 }
507 
508 static inline bool kvm_hv_mode_active(void)		{ return false; }
509 
510 #endif
511 
512 #ifdef CONFIG_KVM_XICS
513 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
514 {
515 	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
516 }
517 
518 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
519 				struct kvm *kvm)
520 {
521 	if (kvm && kvm_irq_bypass)
522 		return kvm->arch.pimap;
523 	return NULL;
524 }
525 
526 extern void kvmppc_alloc_host_rm_ops(void);
527 extern void kvmppc_free_host_rm_ops(void);
528 extern void kvmppc_free_pimap(struct kvm *kvm);
529 extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
530 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
531 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
532 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
533 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
534 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
535 			struct kvm_vcpu *vcpu, u32 cpu);
536 extern void kvmppc_xics_ipi_action(void);
537 extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
538 				   unsigned long host_irq);
539 extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
540 				   unsigned long host_irq);
541 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
542 					struct kvmppc_irq_map *irq_map,
543 					struct kvmppc_passthru_irqmap *pimap,
544 					bool *again);
545 
546 extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
547 			       int level, bool line_status);
548 
549 extern int h_ipi_redirect;
550 #else
551 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
552 				struct kvm *kvm)
553 	{ return NULL; }
554 static inline void kvmppc_alloc_host_rm_ops(void) {};
555 static inline void kvmppc_free_host_rm_ops(void) {};
556 static inline void kvmppc_free_pimap(struct kvm *kvm) {};
557 static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
558 	{ return 0; }
559 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
560 	{ return 0; }
561 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
562 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
563 	{ return 0; }
564 #endif
565 
566 #ifdef CONFIG_KVM_XIVE
567 /*
568  * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
569  * ie. P9 new interrupt controller, while the second "xive" is the legacy
570  * "eXternal Interrupt Vector Entry" which is the configuration of an
571  * interrupt on the "xics" interrupt controller on P8 and earlier. Those
572  * two function consume or produce a legacy "XIVE" state from the
573  * new "XIVE" interrupt controller.
574  */
575 extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
576 				u32 priority);
577 extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
578 				u32 *priority);
579 extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
580 extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
581 extern void kvmppc_xive_init_module(void);
582 extern void kvmppc_xive_exit_module(void);
583 
584 extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
585 				    struct kvm_vcpu *vcpu, u32 cpu);
586 extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
587 extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
588 				  struct irq_desc *host_desc);
589 extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
590 				  struct irq_desc *host_desc);
591 extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
592 extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
593 
594 extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
595 			       int level, bool line_status);
596 extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
597 
598 static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
599 {
600 	return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
601 }
602 
603 extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
604 					   struct kvm_vcpu *vcpu, u32 cpu);
605 extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
606 extern void kvmppc_xive_native_init_module(void);
607 extern void kvmppc_xive_native_exit_module(void);
608 extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
609 				     union kvmppc_one_reg *val);
610 extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
611 				     union kvmppc_one_reg *val);
612 
613 #else
614 static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
615 				       u32 priority) { return -1; }
616 static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
617 				       u32 *priority) { return -1; }
618 static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
619 static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
620 static inline void kvmppc_xive_init_module(void) { }
621 static inline void kvmppc_xive_exit_module(void) { }
622 
623 static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
624 					   struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
625 static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
626 static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
627 					 struct irq_desc *host_desc) { return -ENODEV; }
628 static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
629 					 struct irq_desc *host_desc) { return -ENODEV; }
630 static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
631 static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
632 
633 static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
634 				      int level, bool line_status) { return -ENODEV; }
635 static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
636 
637 static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
638 	{ return 0; }
639 static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
640 			  struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
641 static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
642 static inline void kvmppc_xive_native_init_module(void) { }
643 static inline void kvmppc_xive_native_exit_module(void) { }
644 static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
645 					    union kvmppc_one_reg *val)
646 { return 0; }
647 static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
648 					    union kvmppc_one_reg *val)
649 { return -ENOENT; }
650 
651 #endif /* CONFIG_KVM_XIVE */
652 
653 #if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
654 static inline bool xics_on_xive(void)
655 {
656 	return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
657 }
658 #else
659 static inline bool xics_on_xive(void)
660 {
661 	return false;
662 }
663 #endif
664 
665 /*
666  * Prototypes for functions called only from assembler code.
667  * Having prototypes reduces sparse errors.
668  */
669 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
670 			 unsigned long ioba, unsigned long tce);
671 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
672 				  unsigned long liobn, unsigned long ioba,
673 				  unsigned long tce_list, unsigned long npages);
674 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
675 			   unsigned long liobn, unsigned long ioba,
676 			   unsigned long tce_value, unsigned long npages);
677 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
678                             unsigned int yield_count);
679 long kvmppc_h_random(struct kvm_vcpu *vcpu);
680 void kvmhv_commence_exit(int trap);
681 void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
682 void kvmppc_subcore_enter_guest(void);
683 void kvmppc_subcore_exit_guest(void);
684 long kvmppc_realmode_hmi_handler(void);
685 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
686                     long pte_index, unsigned long pteh, unsigned long ptel);
687 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
688                      unsigned long pte_index, unsigned long avpn);
689 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
690 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
691                       unsigned long pte_index, unsigned long avpn,
692                       unsigned long va);
693 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
694                    unsigned long pte_index);
695 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
696                         unsigned long pte_index);
697 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
698                         unsigned long pte_index);
699 long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
700 			   unsigned long dest, unsigned long src);
701 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
702                           unsigned long slb_v, unsigned int status, bool data);
703 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
704 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
705 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
706 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
707                     unsigned long mfrr);
708 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
709 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
710 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
711 
712 /*
713  * Host-side operations we want to set up while running in real
714  * mode in the guest operating on the xics.
715  * Currently only VCPU wakeup is supported.
716  */
717 
718 union kvmppc_rm_state {
719 	unsigned long raw;
720 	struct {
721 		u32 in_host;
722 		u32 rm_action;
723 	};
724 };
725 
726 struct kvmppc_host_rm_core {
727 	union kvmppc_rm_state rm_state;
728 	void *rm_data;
729 	char pad[112];
730 };
731 
732 struct kvmppc_host_rm_ops {
733 	struct kvmppc_host_rm_core	*rm_core;
734 	void		(*vcpu_kick)(struct kvm_vcpu *vcpu);
735 };
736 
737 extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
738 
739 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
740 {
741 #ifdef CONFIG_KVM_BOOKE_HV
742 	return mfspr(SPRN_GEPR);
743 #elif defined(CONFIG_BOOKE)
744 	return vcpu->arch.epr;
745 #else
746 	return 0;
747 #endif
748 }
749 
750 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
751 {
752 #ifdef CONFIG_KVM_BOOKE_HV
753 	mtspr(SPRN_GEPR, epr);
754 #elif defined(CONFIG_BOOKE)
755 	vcpu->arch.epr = epr;
756 #endif
757 }
758 
759 #ifdef CONFIG_KVM_MPIC
760 
761 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
762 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
763 			     u32 cpu);
764 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
765 
766 #else
767 
768 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
769 {
770 }
771 
772 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
773 		struct kvm_vcpu *vcpu, u32 cpu)
774 {
775 	return -EINVAL;
776 }
777 
778 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
779 		struct kvm_vcpu *vcpu)
780 {
781 }
782 
783 #endif /* CONFIG_KVM_MPIC */
784 
785 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
786 			      struct kvm_config_tlb *cfg);
787 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
788 			     struct kvm_dirty_tlb *cfg);
789 
790 long kvmppc_alloc_lpid(void);
791 void kvmppc_claim_lpid(long lpid);
792 void kvmppc_free_lpid(long lpid);
793 void kvmppc_init_lpid(unsigned long nr_lpids);
794 
795 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
796 {
797 	struct page *page;
798 	/*
799 	 * We can only access pages that the kernel maps
800 	 * as memory. Bail out for unmapped ones.
801 	 */
802 	if (!pfn_valid(pfn))
803 		return;
804 
805 	/* Clear i-cache for new pages */
806 	page = pfn_to_page(pfn);
807 	if (!test_bit(PG_arch_1, &page->flags)) {
808 		flush_dcache_icache_page(page);
809 		set_bit(PG_arch_1, &page->flags);
810 	}
811 }
812 
813 /*
814  * Shared struct helpers. The shared struct can be little or big endian,
815  * depending on the guest endianness. So expose helpers to all of them.
816  */
817 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
818 {
819 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
820 	/* Only Book3S_64 PR supports bi-endian for now */
821 	return vcpu->arch.shared_big_endian;
822 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
823 	/* Book3s_64 HV on little endian is always little endian */
824 	return false;
825 #else
826 	return true;
827 #endif
828 }
829 
830 #define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
831 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
832 {									\
833 	return mfspr(bookehv_spr);					\
834 }									\
835 
836 #define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
837 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
838 {									\
839 	mtspr(bookehv_spr, val);						\
840 }									\
841 
842 #define SHARED_WRAPPER_GET(reg, size)					\
843 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
844 {									\
845 	if (kvmppc_shared_big_endian(vcpu))				\
846 	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
847 	else								\
848 	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
849 }									\
850 
851 #define SHARED_WRAPPER_SET(reg, size)					\
852 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
853 {									\
854 	if (kvmppc_shared_big_endian(vcpu))				\
855 	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
856 	else								\
857 	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
858 }									\
859 
860 #define SHARED_WRAPPER(reg, size)					\
861 	SHARED_WRAPPER_GET(reg, size)					\
862 	SHARED_WRAPPER_SET(reg, size)					\
863 
864 #define SPRNG_WRAPPER(reg, bookehv_spr)					\
865 	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
866 	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
867 
868 #ifdef CONFIG_KVM_BOOKE_HV
869 
870 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
871 	SPRNG_WRAPPER(reg, bookehv_spr)					\
872 
873 #else
874 
875 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
876 	SHARED_WRAPPER(reg, size)					\
877 
878 #endif
879 
880 SHARED_WRAPPER(critical, 64)
881 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
882 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
883 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
884 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
885 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
886 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
887 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
888 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
889 SHARED_WRAPPER_GET(msr, 64)
890 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
891 {
892 	if (kvmppc_shared_big_endian(vcpu))
893 	       vcpu->arch.shared->msr = cpu_to_be64(val);
894 	else
895 	       vcpu->arch.shared->msr = cpu_to_le64(val);
896 }
897 SHARED_WRAPPER(dsisr, 32)
898 SHARED_WRAPPER(int_pending, 32)
899 SHARED_WRAPPER(sprg4, 64)
900 SHARED_WRAPPER(sprg5, 64)
901 SHARED_WRAPPER(sprg6, 64)
902 SHARED_WRAPPER(sprg7, 64)
903 
904 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
905 {
906 	if (kvmppc_shared_big_endian(vcpu))
907 	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
908 	else
909 	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
910 }
911 
912 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
913 {
914 	if (kvmppc_shared_big_endian(vcpu))
915 	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
916 	else
917 	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
918 }
919 
920 /*
921  * Please call after prepare_to_enter. This function puts the lazy ee and irq
922  * disabled tracking state back to normal mode, without actually enabling
923  * interrupts.
924  */
925 static inline void kvmppc_fix_ee_before_entry(void)
926 {
927 	trace_hardirqs_on();
928 
929 #ifdef CONFIG_PPC64
930 	/*
931 	 * To avoid races, the caller must have gone directly from having
932 	 * interrupts fully-enabled to hard-disabled.
933 	 */
934 	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
935 
936 	/* Only need to enable IRQs by hard enabling them after this */
937 	local_paca->irq_happened = 0;
938 	irq_soft_mask_set(IRQS_ENABLED);
939 #endif
940 }
941 
942 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
943 {
944 	ulong ea;
945 	ulong msr_64bit = 0;
946 
947 	ea = kvmppc_get_gpr(vcpu, rb);
948 	if (ra)
949 		ea += kvmppc_get_gpr(vcpu, ra);
950 
951 #if defined(CONFIG_PPC_BOOK3E_64)
952 	msr_64bit = MSR_CM;
953 #elif defined(CONFIG_PPC_BOOK3S_64)
954 	msr_64bit = MSR_SF;
955 #endif
956 
957 	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
958 		ea = (uint32_t)ea;
959 
960 	return ea;
961 }
962 
963 extern void xics_wake_cpu(int cpu);
964 
965 #endif /* __POWERPC_KVM_PPC_H__ */
966