xref: /openbmc/linux/arch/powerpc/kvm/powerpc.c (revision 8730046c)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19  */
20 
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
26 #include <linux/fs.h>
27 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/module.h>
30 #include <linux/irqbypass.h>
31 #include <linux/kvm_irqfd.h>
32 #include <asm/cputable.h>
33 #include <linux/uaccess.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/tlbflush.h>
36 #include <asm/cputhreads.h>
37 #include <asm/irqflags.h>
38 #include <asm/iommu.h>
39 #include "timing.h"
40 #include "irq.h"
41 #include "../mm/mmu_decl.h"
42 
43 #define CREATE_TRACE_POINTS
44 #include "trace.h"
45 
46 struct kvmppc_ops *kvmppc_hv_ops;
47 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
48 struct kvmppc_ops *kvmppc_pr_ops;
49 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
50 
51 
52 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
53 {
54 	return !!(v->arch.pending_exceptions) ||
55 	       v->requests;
56 }
57 
58 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
59 {
60 	return 1;
61 }
62 
63 /*
64  * Common checks before entering the guest world.  Call with interrupts
65  * disabled.
66  *
67  * returns:
68  *
69  * == 1 if we're ready to go into guest state
70  * <= 0 if we need to go back to the host with return value
71  */
72 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
73 {
74 	int r;
75 
76 	WARN_ON(irqs_disabled());
77 	hard_irq_disable();
78 
79 	while (true) {
80 		if (need_resched()) {
81 			local_irq_enable();
82 			cond_resched();
83 			hard_irq_disable();
84 			continue;
85 		}
86 
87 		if (signal_pending(current)) {
88 			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
89 			vcpu->run->exit_reason = KVM_EXIT_INTR;
90 			r = -EINTR;
91 			break;
92 		}
93 
94 		vcpu->mode = IN_GUEST_MODE;
95 
96 		/*
97 		 * Reading vcpu->requests must happen after setting vcpu->mode,
98 		 * so we don't miss a request because the requester sees
99 		 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
100 		 * before next entering the guest (and thus doesn't IPI).
101 		 * This also orders the write to mode from any reads
102 		 * to the page tables done while the VCPU is running.
103 		 * Please see the comment in kvm_flush_remote_tlbs.
104 		 */
105 		smp_mb();
106 
107 		if (vcpu->requests) {
108 			/* Make sure we process requests preemptable */
109 			local_irq_enable();
110 			trace_kvm_check_requests(vcpu);
111 			r = kvmppc_core_check_requests(vcpu);
112 			hard_irq_disable();
113 			if (r > 0)
114 				continue;
115 			break;
116 		}
117 
118 		if (kvmppc_core_prepare_to_enter(vcpu)) {
119 			/* interrupts got enabled in between, so we
120 			   are back at square 1 */
121 			continue;
122 		}
123 
124 		guest_enter_irqoff();
125 		return 1;
126 	}
127 
128 	/* return to host */
129 	local_irq_enable();
130 	return r;
131 }
132 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
133 
134 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
135 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
136 {
137 	struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
138 	int i;
139 
140 	shared->sprg0 = swab64(shared->sprg0);
141 	shared->sprg1 = swab64(shared->sprg1);
142 	shared->sprg2 = swab64(shared->sprg2);
143 	shared->sprg3 = swab64(shared->sprg3);
144 	shared->srr0 = swab64(shared->srr0);
145 	shared->srr1 = swab64(shared->srr1);
146 	shared->dar = swab64(shared->dar);
147 	shared->msr = swab64(shared->msr);
148 	shared->dsisr = swab32(shared->dsisr);
149 	shared->int_pending = swab32(shared->int_pending);
150 	for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
151 		shared->sr[i] = swab32(shared->sr[i]);
152 }
153 #endif
154 
155 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
156 {
157 	int nr = kvmppc_get_gpr(vcpu, 11);
158 	int r;
159 	unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
160 	unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
161 	unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
162 	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
163 	unsigned long r2 = 0;
164 
165 	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
166 		/* 32 bit mode */
167 		param1 &= 0xffffffff;
168 		param2 &= 0xffffffff;
169 		param3 &= 0xffffffff;
170 		param4 &= 0xffffffff;
171 	}
172 
173 	switch (nr) {
174 	case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
175 	{
176 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
177 		/* Book3S can be little endian, find it out here */
178 		int shared_big_endian = true;
179 		if (vcpu->arch.intr_msr & MSR_LE)
180 			shared_big_endian = false;
181 		if (shared_big_endian != vcpu->arch.shared_big_endian)
182 			kvmppc_swab_shared(vcpu);
183 		vcpu->arch.shared_big_endian = shared_big_endian;
184 #endif
185 
186 		if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
187 			/*
188 			 * Older versions of the Linux magic page code had
189 			 * a bug where they would map their trampoline code
190 			 * NX. If that's the case, remove !PR NX capability.
191 			 */
192 			vcpu->arch.disable_kernel_nx = true;
193 			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
194 		}
195 
196 		vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
197 		vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
198 
199 #ifdef CONFIG_PPC_64K_PAGES
200 		/*
201 		 * Make sure our 4k magic page is in the same window of a 64k
202 		 * page within the guest and within the host's page.
203 		 */
204 		if ((vcpu->arch.magic_page_pa & 0xf000) !=
205 		    ((ulong)vcpu->arch.shared & 0xf000)) {
206 			void *old_shared = vcpu->arch.shared;
207 			ulong shared = (ulong)vcpu->arch.shared;
208 			void *new_shared;
209 
210 			shared &= PAGE_MASK;
211 			shared |= vcpu->arch.magic_page_pa & 0xf000;
212 			new_shared = (void*)shared;
213 			memcpy(new_shared, old_shared, 0x1000);
214 			vcpu->arch.shared = new_shared;
215 		}
216 #endif
217 
218 		r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
219 
220 		r = EV_SUCCESS;
221 		break;
222 	}
223 	case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
224 		r = EV_SUCCESS;
225 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
226 		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
227 #endif
228 
229 		/* Second return value is in r4 */
230 		break;
231 	case EV_HCALL_TOKEN(EV_IDLE):
232 		r = EV_SUCCESS;
233 		kvm_vcpu_block(vcpu);
234 		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
235 		break;
236 	default:
237 		r = EV_UNIMPLEMENTED;
238 		break;
239 	}
240 
241 	kvmppc_set_gpr(vcpu, 4, r2);
242 
243 	return r;
244 }
245 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
246 
247 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
248 {
249 	int r = false;
250 
251 	/* We have to know what CPU to virtualize */
252 	if (!vcpu->arch.pvr)
253 		goto out;
254 
255 	/* PAPR only works with book3s_64 */
256 	if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
257 		goto out;
258 
259 	/* HV KVM can only do PAPR mode for now */
260 	if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
261 		goto out;
262 
263 #ifdef CONFIG_KVM_BOOKE_HV
264 	if (!cpu_has_feature(CPU_FTR_EMB_HV))
265 		goto out;
266 #endif
267 
268 	r = true;
269 
270 out:
271 	vcpu->arch.sane = r;
272 	return r ? 0 : -EINVAL;
273 }
274 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
275 
276 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
277 {
278 	enum emulation_result er;
279 	int r;
280 
281 	er = kvmppc_emulate_loadstore(vcpu);
282 	switch (er) {
283 	case EMULATE_DONE:
284 		/* Future optimization: only reload non-volatiles if they were
285 		 * actually modified. */
286 		r = RESUME_GUEST_NV;
287 		break;
288 	case EMULATE_AGAIN:
289 		r = RESUME_GUEST;
290 		break;
291 	case EMULATE_DO_MMIO:
292 		run->exit_reason = KVM_EXIT_MMIO;
293 		/* We must reload nonvolatiles because "update" load/store
294 		 * instructions modify register state. */
295 		/* Future optimization: only reload non-volatiles if they were
296 		 * actually modified. */
297 		r = RESUME_HOST_NV;
298 		break;
299 	case EMULATE_FAIL:
300 	{
301 		u32 last_inst;
302 
303 		kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
304 		/* XXX Deliver Program interrupt to guest. */
305 		pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
306 		r = RESUME_HOST;
307 		break;
308 	}
309 	default:
310 		WARN_ON(1);
311 		r = RESUME_GUEST;
312 	}
313 
314 	return r;
315 }
316 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
317 
318 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
319 	      bool data)
320 {
321 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
322 	struct kvmppc_pte pte;
323 	int r;
324 
325 	vcpu->stat.st++;
326 
327 	r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
328 			 XLATE_WRITE, &pte);
329 	if (r < 0)
330 		return r;
331 
332 	*eaddr = pte.raddr;
333 
334 	if (!pte.may_write)
335 		return -EPERM;
336 
337 	/* Magic page override */
338 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
339 	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
340 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
341 		void *magic = vcpu->arch.shared;
342 		magic += pte.eaddr & 0xfff;
343 		memcpy(magic, ptr, size);
344 		return EMULATE_DONE;
345 	}
346 
347 	if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
348 		return EMULATE_DO_MMIO;
349 
350 	return EMULATE_DONE;
351 }
352 EXPORT_SYMBOL_GPL(kvmppc_st);
353 
354 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
355 		      bool data)
356 {
357 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
358 	struct kvmppc_pte pte;
359 	int rc;
360 
361 	vcpu->stat.ld++;
362 
363 	rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
364 			  XLATE_READ, &pte);
365 	if (rc)
366 		return rc;
367 
368 	*eaddr = pte.raddr;
369 
370 	if (!pte.may_read)
371 		return -EPERM;
372 
373 	if (!data && !pte.may_execute)
374 		return -ENOEXEC;
375 
376 	/* Magic page override */
377 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
378 	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
379 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
380 		void *magic = vcpu->arch.shared;
381 		magic += pte.eaddr & 0xfff;
382 		memcpy(ptr, magic, size);
383 		return EMULATE_DONE;
384 	}
385 
386 	if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
387 		return EMULATE_DO_MMIO;
388 
389 	return EMULATE_DONE;
390 }
391 EXPORT_SYMBOL_GPL(kvmppc_ld);
392 
393 int kvm_arch_hardware_enable(void)
394 {
395 	return 0;
396 }
397 
398 int kvm_arch_hardware_setup(void)
399 {
400 	return 0;
401 }
402 
403 void kvm_arch_check_processor_compat(void *rtn)
404 {
405 	*(int *)rtn = kvmppc_core_check_processor_compat();
406 }
407 
408 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
409 {
410 	struct kvmppc_ops *kvm_ops = NULL;
411 	/*
412 	 * if we have both HV and PR enabled, default is HV
413 	 */
414 	if (type == 0) {
415 		if (kvmppc_hv_ops)
416 			kvm_ops = kvmppc_hv_ops;
417 		else
418 			kvm_ops = kvmppc_pr_ops;
419 		if (!kvm_ops)
420 			goto err_out;
421 	} else	if (type == KVM_VM_PPC_HV) {
422 		if (!kvmppc_hv_ops)
423 			goto err_out;
424 		kvm_ops = kvmppc_hv_ops;
425 	} else if (type == KVM_VM_PPC_PR) {
426 		if (!kvmppc_pr_ops)
427 			goto err_out;
428 		kvm_ops = kvmppc_pr_ops;
429 	} else
430 		goto err_out;
431 
432 	if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
433 		return -ENOENT;
434 
435 	kvm->arch.kvm_ops = kvm_ops;
436 	return kvmppc_core_init_vm(kvm);
437 err_out:
438 	return -EINVAL;
439 }
440 
441 bool kvm_arch_has_vcpu_debugfs(void)
442 {
443 	return false;
444 }
445 
446 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
447 {
448 	return 0;
449 }
450 
451 void kvm_arch_destroy_vm(struct kvm *kvm)
452 {
453 	unsigned int i;
454 	struct kvm_vcpu *vcpu;
455 
456 #ifdef CONFIG_KVM_XICS
457 	/*
458 	 * We call kick_all_cpus_sync() to ensure that all
459 	 * CPUs have executed any pending IPIs before we
460 	 * continue and free VCPUs structures below.
461 	 */
462 	if (is_kvmppc_hv_enabled(kvm))
463 		kick_all_cpus_sync();
464 #endif
465 
466 	kvm_for_each_vcpu(i, vcpu, kvm)
467 		kvm_arch_vcpu_free(vcpu);
468 
469 	mutex_lock(&kvm->lock);
470 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
471 		kvm->vcpus[i] = NULL;
472 
473 	atomic_set(&kvm->online_vcpus, 0);
474 
475 	kvmppc_core_destroy_vm(kvm);
476 
477 	mutex_unlock(&kvm->lock);
478 
479 	/* drop the module reference */
480 	module_put(kvm->arch.kvm_ops->owner);
481 }
482 
483 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
484 {
485 	int r;
486 	/* Assume we're using HV mode when the HV module is loaded */
487 	int hv_enabled = kvmppc_hv_ops ? 1 : 0;
488 
489 	if (kvm) {
490 		/*
491 		 * Hooray - we know which VM type we're running on. Depend on
492 		 * that rather than the guess above.
493 		 */
494 		hv_enabled = is_kvmppc_hv_enabled(kvm);
495 	}
496 
497 	switch (ext) {
498 #ifdef CONFIG_BOOKE
499 	case KVM_CAP_PPC_BOOKE_SREGS:
500 	case KVM_CAP_PPC_BOOKE_WATCHDOG:
501 	case KVM_CAP_PPC_EPR:
502 #else
503 	case KVM_CAP_PPC_SEGSTATE:
504 	case KVM_CAP_PPC_HIOR:
505 	case KVM_CAP_PPC_PAPR:
506 #endif
507 	case KVM_CAP_PPC_UNSET_IRQ:
508 	case KVM_CAP_PPC_IRQ_LEVEL:
509 	case KVM_CAP_ENABLE_CAP:
510 	case KVM_CAP_ENABLE_CAP_VM:
511 	case KVM_CAP_ONE_REG:
512 	case KVM_CAP_IOEVENTFD:
513 	case KVM_CAP_DEVICE_CTRL:
514 		r = 1;
515 		break;
516 	case KVM_CAP_PPC_PAIRED_SINGLES:
517 	case KVM_CAP_PPC_OSI:
518 	case KVM_CAP_PPC_GET_PVINFO:
519 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
520 	case KVM_CAP_SW_TLB:
521 #endif
522 		/* We support this only for PR */
523 		r = !hv_enabled;
524 		break;
525 #ifdef CONFIG_KVM_MMIO
526 	case KVM_CAP_COALESCED_MMIO:
527 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
528 		break;
529 #endif
530 #ifdef CONFIG_KVM_MPIC
531 	case KVM_CAP_IRQ_MPIC:
532 		r = 1;
533 		break;
534 #endif
535 
536 #ifdef CONFIG_PPC_BOOK3S_64
537 	case KVM_CAP_SPAPR_TCE:
538 	case KVM_CAP_SPAPR_TCE_64:
539 	case KVM_CAP_PPC_RTAS:
540 	case KVM_CAP_PPC_FIXUP_HCALL:
541 	case KVM_CAP_PPC_ENABLE_HCALL:
542 #ifdef CONFIG_KVM_XICS
543 	case KVM_CAP_IRQ_XICS:
544 #endif
545 		r = 1;
546 		break;
547 
548 	case KVM_CAP_PPC_ALLOC_HTAB:
549 		r = hv_enabled;
550 		break;
551 #endif /* CONFIG_PPC_BOOK3S_64 */
552 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
553 	case KVM_CAP_PPC_SMT:
554 		r = 0;
555 		if (hv_enabled) {
556 			if (cpu_has_feature(CPU_FTR_ARCH_300))
557 				r = 1;
558 			else
559 				r = threads_per_subcore;
560 		}
561 		break;
562 	case KVM_CAP_PPC_RMA:
563 		r = 0;
564 		break;
565 	case KVM_CAP_PPC_HWRNG:
566 		r = kvmppc_hwrng_present();
567 		break;
568 #endif
569 	case KVM_CAP_SYNC_MMU:
570 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
571 		r = hv_enabled;
572 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
573 		r = 1;
574 #else
575 		r = 0;
576 #endif
577 		break;
578 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
579 	case KVM_CAP_PPC_HTAB_FD:
580 		r = hv_enabled;
581 		break;
582 #endif
583 	case KVM_CAP_NR_VCPUS:
584 		/*
585 		 * Recommending a number of CPUs is somewhat arbitrary; we
586 		 * return the number of present CPUs for -HV (since a host
587 		 * will have secondary threads "offline"), and for other KVM
588 		 * implementations just count online CPUs.
589 		 */
590 		if (hv_enabled)
591 			r = num_present_cpus();
592 		else
593 			r = num_online_cpus();
594 		break;
595 	case KVM_CAP_NR_MEMSLOTS:
596 		r = KVM_USER_MEM_SLOTS;
597 		break;
598 	case KVM_CAP_MAX_VCPUS:
599 		r = KVM_MAX_VCPUS;
600 		break;
601 #ifdef CONFIG_PPC_BOOK3S_64
602 	case KVM_CAP_PPC_GET_SMMU_INFO:
603 		r = 1;
604 		break;
605 	case KVM_CAP_SPAPR_MULTITCE:
606 		r = 1;
607 		break;
608 #endif
609 	case KVM_CAP_PPC_HTM:
610 		r = cpu_has_feature(CPU_FTR_TM_COMP) &&
611 		    is_kvmppc_hv_enabled(kvm);
612 		break;
613 	default:
614 		r = 0;
615 		break;
616 	}
617 	return r;
618 
619 }
620 
621 long kvm_arch_dev_ioctl(struct file *filp,
622                         unsigned int ioctl, unsigned long arg)
623 {
624 	return -EINVAL;
625 }
626 
627 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
628 			   struct kvm_memory_slot *dont)
629 {
630 	kvmppc_core_free_memslot(kvm, free, dont);
631 }
632 
633 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
634 			    unsigned long npages)
635 {
636 	return kvmppc_core_create_memslot(kvm, slot, npages);
637 }
638 
639 int kvm_arch_prepare_memory_region(struct kvm *kvm,
640 				   struct kvm_memory_slot *memslot,
641 				   const struct kvm_userspace_memory_region *mem,
642 				   enum kvm_mr_change change)
643 {
644 	return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
645 }
646 
647 void kvm_arch_commit_memory_region(struct kvm *kvm,
648 				   const struct kvm_userspace_memory_region *mem,
649 				   const struct kvm_memory_slot *old,
650 				   const struct kvm_memory_slot *new,
651 				   enum kvm_mr_change change)
652 {
653 	kvmppc_core_commit_memory_region(kvm, mem, old, new);
654 }
655 
656 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
657 				   struct kvm_memory_slot *slot)
658 {
659 	kvmppc_core_flush_memslot(kvm, slot);
660 }
661 
662 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
663 {
664 	struct kvm_vcpu *vcpu;
665 	vcpu = kvmppc_core_vcpu_create(kvm, id);
666 	if (!IS_ERR(vcpu)) {
667 		vcpu->arch.wqp = &vcpu->wq;
668 		kvmppc_create_vcpu_debugfs(vcpu, id);
669 	}
670 	return vcpu;
671 }
672 
673 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
674 {
675 }
676 
677 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
678 {
679 	/* Make sure we're not using the vcpu anymore */
680 	hrtimer_cancel(&vcpu->arch.dec_timer);
681 
682 	kvmppc_remove_vcpu_debugfs(vcpu);
683 
684 	switch (vcpu->arch.irq_type) {
685 	case KVMPPC_IRQ_MPIC:
686 		kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
687 		break;
688 	case KVMPPC_IRQ_XICS:
689 		kvmppc_xics_free_icp(vcpu);
690 		break;
691 	}
692 
693 	kvmppc_core_vcpu_free(vcpu);
694 }
695 
696 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
697 {
698 	kvm_arch_vcpu_free(vcpu);
699 }
700 
701 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
702 {
703 	return kvmppc_core_pending_dec(vcpu);
704 }
705 
706 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
707 {
708 	struct kvm_vcpu *vcpu;
709 
710 	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
711 	kvmppc_decrementer_func(vcpu);
712 
713 	return HRTIMER_NORESTART;
714 }
715 
716 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
717 {
718 	int ret;
719 
720 	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
721 	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
722 	vcpu->arch.dec_expires = ~(u64)0;
723 
724 #ifdef CONFIG_KVM_EXIT_TIMING
725 	mutex_init(&vcpu->arch.exit_timing_lock);
726 #endif
727 	ret = kvmppc_subarch_vcpu_init(vcpu);
728 	return ret;
729 }
730 
731 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
732 {
733 	kvmppc_mmu_destroy(vcpu);
734 	kvmppc_subarch_vcpu_uninit(vcpu);
735 }
736 
737 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
738 {
739 #ifdef CONFIG_BOOKE
740 	/*
741 	 * vrsave (formerly usprg0) isn't used by Linux, but may
742 	 * be used by the guest.
743 	 *
744 	 * On non-booke this is associated with Altivec and
745 	 * is handled by code in book3s.c.
746 	 */
747 	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
748 #endif
749 	kvmppc_core_vcpu_load(vcpu, cpu);
750 }
751 
752 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
753 {
754 	kvmppc_core_vcpu_put(vcpu);
755 #ifdef CONFIG_BOOKE
756 	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
757 #endif
758 }
759 
760 /*
761  * irq_bypass_add_producer and irq_bypass_del_producer are only
762  * useful if the architecture supports PCI passthrough.
763  * irq_bypass_stop and irq_bypass_start are not needed and so
764  * kvm_ops are not defined for them.
765  */
766 bool kvm_arch_has_irq_bypass(void)
767 {
768 	return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
769 		(kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
770 }
771 
772 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
773 				     struct irq_bypass_producer *prod)
774 {
775 	struct kvm_kernel_irqfd *irqfd =
776 		container_of(cons, struct kvm_kernel_irqfd, consumer);
777 	struct kvm *kvm = irqfd->kvm;
778 
779 	if (kvm->arch.kvm_ops->irq_bypass_add_producer)
780 		return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
781 
782 	return 0;
783 }
784 
785 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
786 				      struct irq_bypass_producer *prod)
787 {
788 	struct kvm_kernel_irqfd *irqfd =
789 		container_of(cons, struct kvm_kernel_irqfd, consumer);
790 	struct kvm *kvm = irqfd->kvm;
791 
792 	if (kvm->arch.kvm_ops->irq_bypass_del_producer)
793 		kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
794 }
795 
796 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
797                                       struct kvm_run *run)
798 {
799 	u64 uninitialized_var(gpr);
800 
801 	if (run->mmio.len > sizeof(gpr)) {
802 		printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
803 		return;
804 	}
805 
806 	if (!vcpu->arch.mmio_host_swabbed) {
807 		switch (run->mmio.len) {
808 		case 8: gpr = *(u64 *)run->mmio.data; break;
809 		case 4: gpr = *(u32 *)run->mmio.data; break;
810 		case 2: gpr = *(u16 *)run->mmio.data; break;
811 		case 1: gpr = *(u8 *)run->mmio.data; break;
812 		}
813 	} else {
814 		switch (run->mmio.len) {
815 		case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
816 		case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
817 		case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
818 		case 1: gpr = *(u8 *)run->mmio.data; break;
819 		}
820 	}
821 
822 	if (vcpu->arch.mmio_sign_extend) {
823 		switch (run->mmio.len) {
824 #ifdef CONFIG_PPC64
825 		case 4:
826 			gpr = (s64)(s32)gpr;
827 			break;
828 #endif
829 		case 2:
830 			gpr = (s64)(s16)gpr;
831 			break;
832 		case 1:
833 			gpr = (s64)(s8)gpr;
834 			break;
835 		}
836 	}
837 
838 	kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
839 
840 	switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
841 	case KVM_MMIO_REG_GPR:
842 		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
843 		break;
844 	case KVM_MMIO_REG_FPR:
845 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
846 		break;
847 #ifdef CONFIG_PPC_BOOK3S
848 	case KVM_MMIO_REG_QPR:
849 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
850 		break;
851 	case KVM_MMIO_REG_FQPR:
852 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
853 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
854 		break;
855 #endif
856 	default:
857 		BUG();
858 	}
859 }
860 
861 static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
862 				unsigned int rt, unsigned int bytes,
863 				int is_default_endian, int sign_extend)
864 {
865 	int idx, ret;
866 	bool host_swabbed;
867 
868 	/* Pity C doesn't have a logical XOR operator */
869 	if (kvmppc_need_byteswap(vcpu)) {
870 		host_swabbed = is_default_endian;
871 	} else {
872 		host_swabbed = !is_default_endian;
873 	}
874 
875 	if (bytes > sizeof(run->mmio.data)) {
876 		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
877 		       run->mmio.len);
878 	}
879 
880 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
881 	run->mmio.len = bytes;
882 	run->mmio.is_write = 0;
883 
884 	vcpu->arch.io_gpr = rt;
885 	vcpu->arch.mmio_host_swabbed = host_swabbed;
886 	vcpu->mmio_needed = 1;
887 	vcpu->mmio_is_write = 0;
888 	vcpu->arch.mmio_sign_extend = sign_extend;
889 
890 	idx = srcu_read_lock(&vcpu->kvm->srcu);
891 
892 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
893 			      bytes, &run->mmio.data);
894 
895 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
896 
897 	if (!ret) {
898 		kvmppc_complete_mmio_load(vcpu, run);
899 		vcpu->mmio_needed = 0;
900 		return EMULATE_DONE;
901 	}
902 
903 	return EMULATE_DO_MMIO;
904 }
905 
906 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
907 		       unsigned int rt, unsigned int bytes,
908 		       int is_default_endian)
909 {
910 	return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
911 }
912 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
913 
914 /* Same as above, but sign extends */
915 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
916 			unsigned int rt, unsigned int bytes,
917 			int is_default_endian)
918 {
919 	return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
920 }
921 
922 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
923 			u64 val, unsigned int bytes, int is_default_endian)
924 {
925 	void *data = run->mmio.data;
926 	int idx, ret;
927 	bool host_swabbed;
928 
929 	/* Pity C doesn't have a logical XOR operator */
930 	if (kvmppc_need_byteswap(vcpu)) {
931 		host_swabbed = is_default_endian;
932 	} else {
933 		host_swabbed = !is_default_endian;
934 	}
935 
936 	if (bytes > sizeof(run->mmio.data)) {
937 		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
938 		       run->mmio.len);
939 	}
940 
941 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
942 	run->mmio.len = bytes;
943 	run->mmio.is_write = 1;
944 	vcpu->mmio_needed = 1;
945 	vcpu->mmio_is_write = 1;
946 
947 	/* Store the value at the lowest bytes in 'data'. */
948 	if (!host_swabbed) {
949 		switch (bytes) {
950 		case 8: *(u64 *)data = val; break;
951 		case 4: *(u32 *)data = val; break;
952 		case 2: *(u16 *)data = val; break;
953 		case 1: *(u8  *)data = val; break;
954 		}
955 	} else {
956 		switch (bytes) {
957 		case 8: *(u64 *)data = swab64(val); break;
958 		case 4: *(u32 *)data = swab32(val); break;
959 		case 2: *(u16 *)data = swab16(val); break;
960 		case 1: *(u8  *)data = val; break;
961 		}
962 	}
963 
964 	idx = srcu_read_lock(&vcpu->kvm->srcu);
965 
966 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
967 			       bytes, &run->mmio.data);
968 
969 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
970 
971 	if (!ret) {
972 		vcpu->mmio_needed = 0;
973 		return EMULATE_DONE;
974 	}
975 
976 	return EMULATE_DO_MMIO;
977 }
978 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
979 
980 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
981 {
982 	int r = 0;
983 	union kvmppc_one_reg val;
984 	int size;
985 
986 	size = one_reg_size(reg->id);
987 	if (size > sizeof(val))
988 		return -EINVAL;
989 
990 	r = kvmppc_get_one_reg(vcpu, reg->id, &val);
991 	if (r == -EINVAL) {
992 		r = 0;
993 		switch (reg->id) {
994 #ifdef CONFIG_ALTIVEC
995 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
996 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
997 				r = -ENXIO;
998 				break;
999 			}
1000 			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1001 			break;
1002 		case KVM_REG_PPC_VSCR:
1003 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1004 				r = -ENXIO;
1005 				break;
1006 			}
1007 			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1008 			break;
1009 		case KVM_REG_PPC_VRSAVE:
1010 			val = get_reg_val(reg->id, vcpu->arch.vrsave);
1011 			break;
1012 #endif /* CONFIG_ALTIVEC */
1013 		default:
1014 			r = -EINVAL;
1015 			break;
1016 		}
1017 	}
1018 
1019 	if (r)
1020 		return r;
1021 
1022 	if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1023 		r = -EFAULT;
1024 
1025 	return r;
1026 }
1027 
1028 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1029 {
1030 	int r;
1031 	union kvmppc_one_reg val;
1032 	int size;
1033 
1034 	size = one_reg_size(reg->id);
1035 	if (size > sizeof(val))
1036 		return -EINVAL;
1037 
1038 	if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1039 		return -EFAULT;
1040 
1041 	r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1042 	if (r == -EINVAL) {
1043 		r = 0;
1044 		switch (reg->id) {
1045 #ifdef CONFIG_ALTIVEC
1046 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1047 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1048 				r = -ENXIO;
1049 				break;
1050 			}
1051 			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1052 			break;
1053 		case KVM_REG_PPC_VSCR:
1054 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1055 				r = -ENXIO;
1056 				break;
1057 			}
1058 			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1059 			break;
1060 		case KVM_REG_PPC_VRSAVE:
1061 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1062 				r = -ENXIO;
1063 				break;
1064 			}
1065 			vcpu->arch.vrsave = set_reg_val(reg->id, val);
1066 			break;
1067 #endif /* CONFIG_ALTIVEC */
1068 		default:
1069 			r = -EINVAL;
1070 			break;
1071 		}
1072 	}
1073 
1074 	return r;
1075 }
1076 
1077 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1078 {
1079 	int r;
1080 	sigset_t sigsaved;
1081 
1082 	if (vcpu->sigset_active)
1083 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1084 
1085 	if (vcpu->mmio_needed) {
1086 		if (!vcpu->mmio_is_write)
1087 			kvmppc_complete_mmio_load(vcpu, run);
1088 		vcpu->mmio_needed = 0;
1089 	} else if (vcpu->arch.osi_needed) {
1090 		u64 *gprs = run->osi.gprs;
1091 		int i;
1092 
1093 		for (i = 0; i < 32; i++)
1094 			kvmppc_set_gpr(vcpu, i, gprs[i]);
1095 		vcpu->arch.osi_needed = 0;
1096 	} else if (vcpu->arch.hcall_needed) {
1097 		int i;
1098 
1099 		kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1100 		for (i = 0; i < 9; ++i)
1101 			kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1102 		vcpu->arch.hcall_needed = 0;
1103 #ifdef CONFIG_BOOKE
1104 	} else if (vcpu->arch.epr_needed) {
1105 		kvmppc_set_epr(vcpu, run->epr.epr);
1106 		vcpu->arch.epr_needed = 0;
1107 #endif
1108 	}
1109 
1110 	r = kvmppc_vcpu_run(run, vcpu);
1111 
1112 	if (vcpu->sigset_active)
1113 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1114 
1115 	return r;
1116 }
1117 
1118 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1119 {
1120 	if (irq->irq == KVM_INTERRUPT_UNSET) {
1121 		kvmppc_core_dequeue_external(vcpu);
1122 		return 0;
1123 	}
1124 
1125 	kvmppc_core_queue_external(vcpu, irq);
1126 
1127 	kvm_vcpu_kick(vcpu);
1128 
1129 	return 0;
1130 }
1131 
1132 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1133 				     struct kvm_enable_cap *cap)
1134 {
1135 	int r;
1136 
1137 	if (cap->flags)
1138 		return -EINVAL;
1139 
1140 	switch (cap->cap) {
1141 	case KVM_CAP_PPC_OSI:
1142 		r = 0;
1143 		vcpu->arch.osi_enabled = true;
1144 		break;
1145 	case KVM_CAP_PPC_PAPR:
1146 		r = 0;
1147 		vcpu->arch.papr_enabled = true;
1148 		break;
1149 	case KVM_CAP_PPC_EPR:
1150 		r = 0;
1151 		if (cap->args[0])
1152 			vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1153 		else
1154 			vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1155 		break;
1156 #ifdef CONFIG_BOOKE
1157 	case KVM_CAP_PPC_BOOKE_WATCHDOG:
1158 		r = 0;
1159 		vcpu->arch.watchdog_enabled = true;
1160 		break;
1161 #endif
1162 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1163 	case KVM_CAP_SW_TLB: {
1164 		struct kvm_config_tlb cfg;
1165 		void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1166 
1167 		r = -EFAULT;
1168 		if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1169 			break;
1170 
1171 		r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1172 		break;
1173 	}
1174 #endif
1175 #ifdef CONFIG_KVM_MPIC
1176 	case KVM_CAP_IRQ_MPIC: {
1177 		struct fd f;
1178 		struct kvm_device *dev;
1179 
1180 		r = -EBADF;
1181 		f = fdget(cap->args[0]);
1182 		if (!f.file)
1183 			break;
1184 
1185 		r = -EPERM;
1186 		dev = kvm_device_from_filp(f.file);
1187 		if (dev)
1188 			r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1189 
1190 		fdput(f);
1191 		break;
1192 	}
1193 #endif
1194 #ifdef CONFIG_KVM_XICS
1195 	case KVM_CAP_IRQ_XICS: {
1196 		struct fd f;
1197 		struct kvm_device *dev;
1198 
1199 		r = -EBADF;
1200 		f = fdget(cap->args[0]);
1201 		if (!f.file)
1202 			break;
1203 
1204 		r = -EPERM;
1205 		dev = kvm_device_from_filp(f.file);
1206 		if (dev)
1207 			r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1208 
1209 		fdput(f);
1210 		break;
1211 	}
1212 #endif /* CONFIG_KVM_XICS */
1213 	default:
1214 		r = -EINVAL;
1215 		break;
1216 	}
1217 
1218 	if (!r)
1219 		r = kvmppc_sanity_check(vcpu);
1220 
1221 	return r;
1222 }
1223 
1224 bool kvm_arch_intc_initialized(struct kvm *kvm)
1225 {
1226 #ifdef CONFIG_KVM_MPIC
1227 	if (kvm->arch.mpic)
1228 		return true;
1229 #endif
1230 #ifdef CONFIG_KVM_XICS
1231 	if (kvm->arch.xics)
1232 		return true;
1233 #endif
1234 	return false;
1235 }
1236 
1237 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1238                                     struct kvm_mp_state *mp_state)
1239 {
1240 	return -EINVAL;
1241 }
1242 
1243 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1244                                     struct kvm_mp_state *mp_state)
1245 {
1246 	return -EINVAL;
1247 }
1248 
1249 long kvm_arch_vcpu_ioctl(struct file *filp,
1250                          unsigned int ioctl, unsigned long arg)
1251 {
1252 	struct kvm_vcpu *vcpu = filp->private_data;
1253 	void __user *argp = (void __user *)arg;
1254 	long r;
1255 
1256 	switch (ioctl) {
1257 	case KVM_INTERRUPT: {
1258 		struct kvm_interrupt irq;
1259 		r = -EFAULT;
1260 		if (copy_from_user(&irq, argp, sizeof(irq)))
1261 			goto out;
1262 		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1263 		goto out;
1264 	}
1265 
1266 	case KVM_ENABLE_CAP:
1267 	{
1268 		struct kvm_enable_cap cap;
1269 		r = -EFAULT;
1270 		if (copy_from_user(&cap, argp, sizeof(cap)))
1271 			goto out;
1272 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1273 		break;
1274 	}
1275 
1276 	case KVM_SET_ONE_REG:
1277 	case KVM_GET_ONE_REG:
1278 	{
1279 		struct kvm_one_reg reg;
1280 		r = -EFAULT;
1281 		if (copy_from_user(&reg, argp, sizeof(reg)))
1282 			goto out;
1283 		if (ioctl == KVM_SET_ONE_REG)
1284 			r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
1285 		else
1286 			r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
1287 		break;
1288 	}
1289 
1290 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1291 	case KVM_DIRTY_TLB: {
1292 		struct kvm_dirty_tlb dirty;
1293 		r = -EFAULT;
1294 		if (copy_from_user(&dirty, argp, sizeof(dirty)))
1295 			goto out;
1296 		r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1297 		break;
1298 	}
1299 #endif
1300 	default:
1301 		r = -EINVAL;
1302 	}
1303 
1304 out:
1305 	return r;
1306 }
1307 
1308 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1309 {
1310 	return VM_FAULT_SIGBUS;
1311 }
1312 
1313 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1314 {
1315 	u32 inst_nop = 0x60000000;
1316 #ifdef CONFIG_KVM_BOOKE_HV
1317 	u32 inst_sc1 = 0x44000022;
1318 	pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1319 	pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1320 	pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1321 	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1322 #else
1323 	u32 inst_lis = 0x3c000000;
1324 	u32 inst_ori = 0x60000000;
1325 	u32 inst_sc = 0x44000002;
1326 	u32 inst_imm_mask = 0xffff;
1327 
1328 	/*
1329 	 * The hypercall to get into KVM from within guest context is as
1330 	 * follows:
1331 	 *
1332 	 *    lis r0, r0, KVM_SC_MAGIC_R0@h
1333 	 *    ori r0, KVM_SC_MAGIC_R0@l
1334 	 *    sc
1335 	 *    nop
1336 	 */
1337 	pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1338 	pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1339 	pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1340 	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1341 #endif
1342 
1343 	pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1344 
1345 	return 0;
1346 }
1347 
1348 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1349 			  bool line_status)
1350 {
1351 	if (!irqchip_in_kernel(kvm))
1352 		return -ENXIO;
1353 
1354 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1355 					irq_event->irq, irq_event->level,
1356 					line_status);
1357 	return 0;
1358 }
1359 
1360 
1361 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1362 				   struct kvm_enable_cap *cap)
1363 {
1364 	int r;
1365 
1366 	if (cap->flags)
1367 		return -EINVAL;
1368 
1369 	switch (cap->cap) {
1370 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1371 	case KVM_CAP_PPC_ENABLE_HCALL: {
1372 		unsigned long hcall = cap->args[0];
1373 
1374 		r = -EINVAL;
1375 		if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1376 		    cap->args[1] > 1)
1377 			break;
1378 		if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1379 			break;
1380 		if (cap->args[1])
1381 			set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1382 		else
1383 			clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1384 		r = 0;
1385 		break;
1386 	}
1387 #endif
1388 	default:
1389 		r = -EINVAL;
1390 		break;
1391 	}
1392 
1393 	return r;
1394 }
1395 
1396 long kvm_arch_vm_ioctl(struct file *filp,
1397                        unsigned int ioctl, unsigned long arg)
1398 {
1399 	struct kvm *kvm __maybe_unused = filp->private_data;
1400 	void __user *argp = (void __user *)arg;
1401 	long r;
1402 
1403 	switch (ioctl) {
1404 	case KVM_PPC_GET_PVINFO: {
1405 		struct kvm_ppc_pvinfo pvinfo;
1406 		memset(&pvinfo, 0, sizeof(pvinfo));
1407 		r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1408 		if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1409 			r = -EFAULT;
1410 			goto out;
1411 		}
1412 
1413 		break;
1414 	}
1415 	case KVM_ENABLE_CAP:
1416 	{
1417 		struct kvm_enable_cap cap;
1418 		r = -EFAULT;
1419 		if (copy_from_user(&cap, argp, sizeof(cap)))
1420 			goto out;
1421 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1422 		break;
1423 	}
1424 #ifdef CONFIG_PPC_BOOK3S_64
1425 	case KVM_CREATE_SPAPR_TCE_64: {
1426 		struct kvm_create_spapr_tce_64 create_tce_64;
1427 
1428 		r = -EFAULT;
1429 		if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
1430 			goto out;
1431 		if (create_tce_64.flags) {
1432 			r = -EINVAL;
1433 			goto out;
1434 		}
1435 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1436 		goto out;
1437 	}
1438 	case KVM_CREATE_SPAPR_TCE: {
1439 		struct kvm_create_spapr_tce create_tce;
1440 		struct kvm_create_spapr_tce_64 create_tce_64;
1441 
1442 		r = -EFAULT;
1443 		if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1444 			goto out;
1445 
1446 		create_tce_64.liobn = create_tce.liobn;
1447 		create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
1448 		create_tce_64.offset = 0;
1449 		create_tce_64.size = create_tce.window_size >>
1450 				IOMMU_PAGE_SHIFT_4K;
1451 		create_tce_64.flags = 0;
1452 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1453 		goto out;
1454 	}
1455 	case KVM_PPC_GET_SMMU_INFO: {
1456 		struct kvm_ppc_smmu_info info;
1457 		struct kvm *kvm = filp->private_data;
1458 
1459 		memset(&info, 0, sizeof(info));
1460 		r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1461 		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1462 			r = -EFAULT;
1463 		break;
1464 	}
1465 	case KVM_PPC_RTAS_DEFINE_TOKEN: {
1466 		struct kvm *kvm = filp->private_data;
1467 
1468 		r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1469 		break;
1470 	}
1471 	default: {
1472 		struct kvm *kvm = filp->private_data;
1473 		r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1474 	}
1475 #else /* CONFIG_PPC_BOOK3S_64 */
1476 	default:
1477 		r = -ENOTTY;
1478 #endif
1479 	}
1480 out:
1481 	return r;
1482 }
1483 
1484 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1485 static unsigned long nr_lpids;
1486 
1487 long kvmppc_alloc_lpid(void)
1488 {
1489 	long lpid;
1490 
1491 	do {
1492 		lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1493 		if (lpid >= nr_lpids) {
1494 			pr_err("%s: No LPIDs free\n", __func__);
1495 			return -ENOMEM;
1496 		}
1497 	} while (test_and_set_bit(lpid, lpid_inuse));
1498 
1499 	return lpid;
1500 }
1501 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1502 
1503 void kvmppc_claim_lpid(long lpid)
1504 {
1505 	set_bit(lpid, lpid_inuse);
1506 }
1507 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1508 
1509 void kvmppc_free_lpid(long lpid)
1510 {
1511 	clear_bit(lpid, lpid_inuse);
1512 }
1513 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1514 
1515 void kvmppc_init_lpid(unsigned long nr_lpids_param)
1516 {
1517 	nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1518 	memset(lpid_inuse, 0, sizeof(lpid_inuse));
1519 }
1520 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1521 
1522 int kvm_arch_init(void *opaque)
1523 {
1524 	return 0;
1525 }
1526 
1527 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
1528