xref: /openbmc/linux/arch/powerpc/kvm/powerpc.c (revision 7e24a55b2122746c2eef192296fc84624354f895)
1d94d71cbSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2bbf45ba5SHollis Blanchard /*
3bbf45ba5SHollis Blanchard  *
4bbf45ba5SHollis Blanchard  * Copyright IBM Corp. 2007
5bbf45ba5SHollis Blanchard  *
6bbf45ba5SHollis Blanchard  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7bbf45ba5SHollis Blanchard  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
8bbf45ba5SHollis Blanchard  */
9bbf45ba5SHollis Blanchard 
10bbf45ba5SHollis Blanchard #include <linux/errno.h>
11bbf45ba5SHollis Blanchard #include <linux/err.h>
12bbf45ba5SHollis Blanchard #include <linux/kvm_host.h>
13bbf45ba5SHollis Blanchard #include <linux/vmalloc.h>
14544c6761SAlexander Graf #include <linux/hrtimer.h>
15174cd4b1SIngo Molnar #include <linux/sched/signal.h>
16bbf45ba5SHollis Blanchard #include <linux/fs.h>
175a0e3ad6STejun Heo #include <linux/slab.h>
18eb1e4f43SScott Wood #include <linux/file.h>
19cbbc58d4SAneesh Kumar K.V #include <linux/module.h>
209576730dSSuresh Warrier #include <linux/irqbypass.h>
219576730dSSuresh Warrier #include <linux/kvm_irqfd.h>
22e6f6390aSChristophe Leroy #include <linux/of.h>
23bbf45ba5SHollis Blanchard #include <asm/cputable.h>
247c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
25bbf45ba5SHollis Blanchard #include <asm/kvm_ppc.h>
26371fefd6SPaul Mackerras #include <asm/cputhreads.h>
27bd2be683SAlexander Graf #include <asm/irqflags.h>
2858ded420SAlexey Kardashevskiy #include <asm/iommu.h>
296f63e81bSBin Lu #include <asm/switch_to.h>
305af50993SBenjamin Herrenschmidt #include <asm/xive.h>
313214d01fSPaul Mackerras #ifdef CONFIG_PPC_PSERIES
323214d01fSPaul Mackerras #include <asm/hvcall.h>
333214d01fSPaul Mackerras #include <asm/plpar_wrappers.h>
343214d01fSPaul Mackerras #endif
3522945688SBharata B Rao #include <asm/ultravisor.h>
36113fe88eSChristophe Leroy #include <asm/setup.h>
375af50993SBenjamin Herrenschmidt 
3873e75b41SHollis Blanchard #include "timing.h"
39fad7b9b5SPaul Mackerras #include "../mm/mmu_decl.h"
40bbf45ba5SHollis Blanchard 
4146f43c6eSMarcelo Tosatti #define CREATE_TRACE_POINTS
4246f43c6eSMarcelo Tosatti #include "trace.h"
4346f43c6eSMarcelo Tosatti 
44cbbc58d4SAneesh Kumar K.V struct kvmppc_ops *kvmppc_hv_ops;
45cbbc58d4SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
46cbbc58d4SAneesh Kumar K.V struct kvmppc_ops *kvmppc_pr_ops;
47cbbc58d4SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
48cbbc58d4SAneesh Kumar K.V 
493a167beaSAneesh Kumar K.V 
kvm_arch_vcpu_runnable(struct kvm_vcpu * v)50bbf45ba5SHollis Blanchard int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
51bbf45ba5SHollis Blanchard {
522fa6e1e1SRadim Krčmář 	return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
53bbf45ba5SHollis Blanchard }
54bbf45ba5SHollis Blanchard 
kvm_arch_dy_runnable(struct kvm_vcpu * vcpu)5517e433b5SWanpeng Li bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
5617e433b5SWanpeng Li {
5717e433b5SWanpeng Li 	return kvm_arch_vcpu_runnable(vcpu);
5817e433b5SWanpeng Li }
5917e433b5SWanpeng Li 
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)60199b5763SLongpeng(Mike) bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
61199b5763SLongpeng(Mike) {
62199b5763SLongpeng(Mike) 	return false;
63199b5763SLongpeng(Mike) }
64199b5763SLongpeng(Mike) 
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)65b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
66b6d33834SChristoffer Dall {
67b6d33834SChristoffer Dall 	return 1;
68b6d33834SChristoffer Dall }
69b6d33834SChristoffer Dall 
7003d25c5bSAlexander Graf /*
7103d25c5bSAlexander Graf  * Common checks before entering the guest world.  Call with interrupts
7203d25c5bSAlexander Graf  * disabled.
7303d25c5bSAlexander Graf  *
747ee78855SAlexander Graf  * returns:
757ee78855SAlexander Graf  *
767ee78855SAlexander Graf  * == 1 if we're ready to go into guest state
777ee78855SAlexander Graf  * <= 0 if we need to go back to the host with return value
7803d25c5bSAlexander Graf  */
kvmppc_prepare_to_enter(struct kvm_vcpu * vcpu)7903d25c5bSAlexander Graf int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
8003d25c5bSAlexander Graf {
816c85f52bSScott Wood 	int r;
8203d25c5bSAlexander Graf 
836c85f52bSScott Wood 	WARN_ON(irqs_disabled());
846c85f52bSScott Wood 	hard_irq_disable();
856c85f52bSScott Wood 
8603d25c5bSAlexander Graf 	while (true) {
8703d25c5bSAlexander Graf 		if (need_resched()) {
8803d25c5bSAlexander Graf 			local_irq_enable();
8903d25c5bSAlexander Graf 			cond_resched();
906c85f52bSScott Wood 			hard_irq_disable();
9103d25c5bSAlexander Graf 			continue;
9203d25c5bSAlexander Graf 		}
9303d25c5bSAlexander Graf 
9403d25c5bSAlexander Graf 		if (signal_pending(current)) {
957ee78855SAlexander Graf 			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
967ee78855SAlexander Graf 			vcpu->run->exit_reason = KVM_EXIT_INTR;
977ee78855SAlexander Graf 			r = -EINTR;
9803d25c5bSAlexander Graf 			break;
9903d25c5bSAlexander Graf 		}
10003d25c5bSAlexander Graf 
1015bd1cf11SScott Wood 		vcpu->mode = IN_GUEST_MODE;
1025bd1cf11SScott Wood 
1035bd1cf11SScott Wood 		/*
1045bd1cf11SScott Wood 		 * Reading vcpu->requests must happen after setting vcpu->mode,
1055bd1cf11SScott Wood 		 * so we don't miss a request because the requester sees
1065bd1cf11SScott Wood 		 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
1075bd1cf11SScott Wood 		 * before next entering the guest (and thus doesn't IPI).
108489153c7SLan Tianyu 		 * This also orders the write to mode from any reads
109489153c7SLan Tianyu 		 * to the page tables done while the VCPU is running.
110489153c7SLan Tianyu 		 * Please see the comment in kvm_flush_remote_tlbs.
1115bd1cf11SScott Wood 		 */
11203d25c5bSAlexander Graf 		smp_mb();
1135bd1cf11SScott Wood 
1142fa6e1e1SRadim Krčmář 		if (kvm_request_pending(vcpu)) {
11503d25c5bSAlexander Graf 			/* Make sure we process requests preemptable */
11603d25c5bSAlexander Graf 			local_irq_enable();
11703d25c5bSAlexander Graf 			trace_kvm_check_requests(vcpu);
1187c973a2eSAlexander Graf 			r = kvmppc_core_check_requests(vcpu);
1196c85f52bSScott Wood 			hard_irq_disable();
1207c973a2eSAlexander Graf 			if (r > 0)
12103d25c5bSAlexander Graf 				continue;
1227c973a2eSAlexander Graf 			break;
12303d25c5bSAlexander Graf 		}
12403d25c5bSAlexander Graf 
12503d25c5bSAlexander Graf 		if (kvmppc_core_prepare_to_enter(vcpu)) {
12603d25c5bSAlexander Graf 			/* interrupts got enabled in between, so we
12703d25c5bSAlexander Graf 			   are back at square 1 */
12803d25c5bSAlexander Graf 			continue;
12903d25c5bSAlexander Graf 		}
13003d25c5bSAlexander Graf 
1316edaa530SPaolo Bonzini 		guest_enter_irqoff();
1326c85f52bSScott Wood 		return 1;
13303d25c5bSAlexander Graf 	}
13403d25c5bSAlexander Graf 
1356c85f52bSScott Wood 	/* return to host */
1366c85f52bSScott Wood 	local_irq_enable();
13703d25c5bSAlexander Graf 	return r;
13803d25c5bSAlexander Graf }
1392ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
14003d25c5bSAlexander Graf 
1415deb8e7aSAlexander Graf #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
kvmppc_swab_shared(struct kvm_vcpu * vcpu)1425deb8e7aSAlexander Graf static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
1435deb8e7aSAlexander Graf {
1445deb8e7aSAlexander Graf 	struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
1455deb8e7aSAlexander Graf 	int i;
1465deb8e7aSAlexander Graf 
1475deb8e7aSAlexander Graf 	shared->sprg0 = swab64(shared->sprg0);
1485deb8e7aSAlexander Graf 	shared->sprg1 = swab64(shared->sprg1);
1495deb8e7aSAlexander Graf 	shared->sprg2 = swab64(shared->sprg2);
1505deb8e7aSAlexander Graf 	shared->sprg3 = swab64(shared->sprg3);
1515deb8e7aSAlexander Graf 	shared->srr0 = swab64(shared->srr0);
1525deb8e7aSAlexander Graf 	shared->srr1 = swab64(shared->srr1);
1535deb8e7aSAlexander Graf 	shared->dar = swab64(shared->dar);
1545deb8e7aSAlexander Graf 	shared->msr = swab64(shared->msr);
1555deb8e7aSAlexander Graf 	shared->dsisr = swab32(shared->dsisr);
1565deb8e7aSAlexander Graf 	shared->int_pending = swab32(shared->int_pending);
1575deb8e7aSAlexander Graf 	for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
1585deb8e7aSAlexander Graf 		shared->sr[i] = swab32(shared->sr[i]);
1595deb8e7aSAlexander Graf }
1605deb8e7aSAlexander Graf #endif
1615deb8e7aSAlexander Graf 
kvmppc_kvm_pv(struct kvm_vcpu * vcpu)1622a342ed5SAlexander Graf int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
1632a342ed5SAlexander Graf {
1642a342ed5SAlexander Graf 	int nr = kvmppc_get_gpr(vcpu, 11);
1652a342ed5SAlexander Graf 	int r;
1662a342ed5SAlexander Graf 	unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
1672a342ed5SAlexander Graf 	unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
1682a342ed5SAlexander Graf 	unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
1692a342ed5SAlexander Graf 	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
1702a342ed5SAlexander Graf 	unsigned long r2 = 0;
1712a342ed5SAlexander Graf 
1725deb8e7aSAlexander Graf 	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
1732a342ed5SAlexander Graf 		/* 32 bit mode */
1742a342ed5SAlexander Graf 		param1 &= 0xffffffff;
1752a342ed5SAlexander Graf 		param2 &= 0xffffffff;
1762a342ed5SAlexander Graf 		param3 &= 0xffffffff;
1772a342ed5SAlexander Graf 		param4 &= 0xffffffff;
1782a342ed5SAlexander Graf 	}
1792a342ed5SAlexander Graf 
1802a342ed5SAlexander Graf 	switch (nr) {
181fdcf8bd7SStuart Yoder 	case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
1825fc87407SAlexander Graf 	{
1835deb8e7aSAlexander Graf #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
1845deb8e7aSAlexander Graf 		/* Book3S can be little endian, find it out here */
1855deb8e7aSAlexander Graf 		int shared_big_endian = true;
1865deb8e7aSAlexander Graf 		if (vcpu->arch.intr_msr & MSR_LE)
1875deb8e7aSAlexander Graf 			shared_big_endian = false;
1885deb8e7aSAlexander Graf 		if (shared_big_endian != vcpu->arch.shared_big_endian)
1895deb8e7aSAlexander Graf 			kvmppc_swab_shared(vcpu);
1905deb8e7aSAlexander Graf 		vcpu->arch.shared_big_endian = shared_big_endian;
1915deb8e7aSAlexander Graf #endif
1925deb8e7aSAlexander Graf 
193f3383cf8SAlexander Graf 		if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
194f3383cf8SAlexander Graf 			/*
195f3383cf8SAlexander Graf 			 * Older versions of the Linux magic page code had
196f3383cf8SAlexander Graf 			 * a bug where they would map their trampoline code
197f3383cf8SAlexander Graf 			 * NX. If that's the case, remove !PR NX capability.
198f3383cf8SAlexander Graf 			 */
199f3383cf8SAlexander Graf 			vcpu->arch.disable_kernel_nx = true;
200f3383cf8SAlexander Graf 			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
201f3383cf8SAlexander Graf 		}
202f3383cf8SAlexander Graf 
203f3383cf8SAlexander Graf 		vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
204f3383cf8SAlexander Graf 		vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
2055fc87407SAlexander Graf 
20689b68c96SAlexander Graf #ifdef CONFIG_PPC_64K_PAGES
20789b68c96SAlexander Graf 		/*
20889b68c96SAlexander Graf 		 * Make sure our 4k magic page is in the same window of a 64k
20989b68c96SAlexander Graf 		 * page within the guest and within the host's page.
21089b68c96SAlexander Graf 		 */
21189b68c96SAlexander Graf 		if ((vcpu->arch.magic_page_pa & 0xf000) !=
21289b68c96SAlexander Graf 		    ((ulong)vcpu->arch.shared & 0xf000)) {
21389b68c96SAlexander Graf 			void *old_shared = vcpu->arch.shared;
21489b68c96SAlexander Graf 			ulong shared = (ulong)vcpu->arch.shared;
21589b68c96SAlexander Graf 			void *new_shared;
21689b68c96SAlexander Graf 
21789b68c96SAlexander Graf 			shared &= PAGE_MASK;
21889b68c96SAlexander Graf 			shared |= vcpu->arch.magic_page_pa & 0xf000;
21989b68c96SAlexander Graf 			new_shared = (void*)shared;
22089b68c96SAlexander Graf 			memcpy(new_shared, old_shared, 0x1000);
22189b68c96SAlexander Graf 			vcpu->arch.shared = new_shared;
22289b68c96SAlexander Graf 		}
22389b68c96SAlexander Graf #endif
22489b68c96SAlexander Graf 
225b5904972SScott Wood 		r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
2267508e16cSAlexander Graf 
227fdcf8bd7SStuart Yoder 		r = EV_SUCCESS;
2285fc87407SAlexander Graf 		break;
2295fc87407SAlexander Graf 	}
230fdcf8bd7SStuart Yoder 	case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
231fdcf8bd7SStuart Yoder 		r = EV_SUCCESS;
232bf7ca4bdSAlexander Graf #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
2335fc87407SAlexander Graf 		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
2345fc87407SAlexander Graf #endif
2352a342ed5SAlexander Graf 
2362a342ed5SAlexander Graf 		/* Second return value is in r4 */
2372a342ed5SAlexander Graf 		break;
2389202e076SLiu Yu-B13201 	case EV_HCALL_TOKEN(EV_IDLE):
2399202e076SLiu Yu-B13201 		r = EV_SUCCESS;
24091b99ea7SSean Christopherson 		kvm_vcpu_halt(vcpu);
2419202e076SLiu Yu-B13201 		break;
2422a342ed5SAlexander Graf 	default:
243fdcf8bd7SStuart Yoder 		r = EV_UNIMPLEMENTED;
2442a342ed5SAlexander Graf 		break;
2452a342ed5SAlexander Graf 	}
2462a342ed5SAlexander Graf 
2477508e16cSAlexander Graf 	kvmppc_set_gpr(vcpu, 4, r2);
2487508e16cSAlexander Graf 
2492a342ed5SAlexander Graf 	return r;
2502a342ed5SAlexander Graf }
2512ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
252bbf45ba5SHollis Blanchard 
kvmppc_sanity_check(struct kvm_vcpu * vcpu)253af8f38b3SAlexander Graf int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
254af8f38b3SAlexander Graf {
255af8f38b3SAlexander Graf 	int r = false;
256af8f38b3SAlexander Graf 
257af8f38b3SAlexander Graf 	/* We have to know what CPU to virtualize */
258af8f38b3SAlexander Graf 	if (!vcpu->arch.pvr)
259af8f38b3SAlexander Graf 		goto out;
260af8f38b3SAlexander Graf 
261af8f38b3SAlexander Graf 	/* PAPR only works with book3s_64 */
262af8f38b3SAlexander Graf 	if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
263af8f38b3SAlexander Graf 		goto out;
264af8f38b3SAlexander Graf 
265af8f38b3SAlexander Graf 	/* HV KVM can only do PAPR mode for now */
266a78b55d1SAneesh Kumar K.V 	if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
267af8f38b3SAlexander Graf 		goto out;
268af8f38b3SAlexander Graf 
269d30f6e48SScott Wood #ifdef CONFIG_KVM_BOOKE_HV
270d30f6e48SScott Wood 	if (!cpu_has_feature(CPU_FTR_EMB_HV))
271d30f6e48SScott Wood 		goto out;
272d30f6e48SScott Wood #endif
273d30f6e48SScott Wood 
274af8f38b3SAlexander Graf 	r = true;
275af8f38b3SAlexander Graf 
276af8f38b3SAlexander Graf out:
277af8f38b3SAlexander Graf 	vcpu->arch.sane = r;
278af8f38b3SAlexander Graf 	return r ? 0 : -EINVAL;
279af8f38b3SAlexander Graf }
2802ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
281af8f38b3SAlexander Graf 
kvmppc_emulate_mmio(struct kvm_vcpu * vcpu)2828c99d345STianjia Zhang int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
283bbf45ba5SHollis Blanchard {
284bbf45ba5SHollis Blanchard 	enum emulation_result er;
285bbf45ba5SHollis Blanchard 	int r;
286bbf45ba5SHollis Blanchard 
287d69614a2SAlexander Graf 	er = kvmppc_emulate_loadstore(vcpu);
288bbf45ba5SHollis Blanchard 	switch (er) {
289bbf45ba5SHollis Blanchard 	case EMULATE_DONE:
290bbf45ba5SHollis Blanchard 		/* Future optimization: only reload non-volatiles if they were
291bbf45ba5SHollis Blanchard 		 * actually modified. */
292bbf45ba5SHollis Blanchard 		r = RESUME_GUEST_NV;
293bbf45ba5SHollis Blanchard 		break;
29451f04726SMihai Caraman 	case EMULATE_AGAIN:
29551f04726SMihai Caraman 		r = RESUME_GUEST;
29651f04726SMihai Caraman 		break;
297bbf45ba5SHollis Blanchard 	case EMULATE_DO_MMIO:
2988c99d345STianjia Zhang 		vcpu->run->exit_reason = KVM_EXIT_MMIO;
299bbf45ba5SHollis Blanchard 		/* We must reload nonvolatiles because "update" load/store
300bbf45ba5SHollis Blanchard 		 * instructions modify register state. */
301bbf45ba5SHollis Blanchard 		/* Future optimization: only reload non-volatiles if they were
302bbf45ba5SHollis Blanchard 		 * actually modified. */
303bbf45ba5SHollis Blanchard 		r = RESUME_HOST_NV;
304bbf45ba5SHollis Blanchard 		break;
305bbf45ba5SHollis Blanchard 	case EMULATE_FAIL:
30651f04726SMihai Caraman 	{
307acf17878SPaul Mackerras 		ppc_inst_t last_inst;
30851f04726SMihai Caraman 
3098d0eff63SAlexander Graf 		kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
310349fbfe9SFabiano Rosas 		kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
311acf17878SPaul Mackerras 				      ppc_inst_val(last_inst));
312c1c8a663SFabiano Rosas 
313c1c8a663SFabiano Rosas 		/*
314c1c8a663SFabiano Rosas 		 * Injecting a Data Storage here is a bit more
315c1c8a663SFabiano Rosas 		 * accurate since the instruction that caused the
316c1c8a663SFabiano Rosas 		 * access could still be a valid one.
317c1c8a663SFabiano Rosas 		 */
318c1c8a663SFabiano Rosas 		if (!IS_ENABLED(CONFIG_BOOKE)) {
319c1c8a663SFabiano Rosas 			ulong dsisr = DSISR_BADACCESS;
320c1c8a663SFabiano Rosas 
321c1c8a663SFabiano Rosas 			if (vcpu->mmio_is_write)
322c1c8a663SFabiano Rosas 				dsisr |= DSISR_ISSTORE;
323c1c8a663SFabiano Rosas 
3246cd5c1dbSNicholas Piggin 			kvmppc_core_queue_data_storage(vcpu,
3256cd5c1dbSNicholas Piggin 					kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
326460ba21dSNicholas Piggin 					vcpu->arch.vaddr_accessed, dsisr);
327c1c8a663SFabiano Rosas 		} else {
328c1c8a663SFabiano Rosas 			/*
329c1c8a663SFabiano Rosas 			 * BookE does not send a SIGBUS on a bad
330c1c8a663SFabiano Rosas 			 * fault, so use a Program interrupt instead
331c1c8a663SFabiano Rosas 			 * to avoid a fault loop.
332c1c8a663SFabiano Rosas 			 */
333c1c8a663SFabiano Rosas 			kvmppc_core_queue_program(vcpu, 0);
334c1c8a663SFabiano Rosas 		}
335c1c8a663SFabiano Rosas 
336349fbfe9SFabiano Rosas 		r = RESUME_GUEST;
337bbf45ba5SHollis Blanchard 		break;
33851f04726SMihai Caraman 	}
339bbf45ba5SHollis Blanchard 	default:
3405a33169eSAlexander Graf 		WARN_ON(1);
3415a33169eSAlexander Graf 		r = RESUME_GUEST;
342bbf45ba5SHollis Blanchard 	}
343bbf45ba5SHollis Blanchard 
344bbf45ba5SHollis Blanchard 	return r;
345bbf45ba5SHollis Blanchard }
3462ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
347bbf45ba5SHollis Blanchard 
kvmppc_st(struct kvm_vcpu * vcpu,ulong * eaddr,int size,void * ptr,bool data)34835c4a733SAlexander Graf int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
34935c4a733SAlexander Graf 	      bool data)
35035c4a733SAlexander Graf {
351c12fb43cSAlexander Graf 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
35235c4a733SAlexander Graf 	struct kvmppc_pte pte;
353cc6929ccSSuraj Jitindar Singh 	int r = -EINVAL;
35435c4a733SAlexander Graf 
35535c4a733SAlexander Graf 	vcpu->stat.st++;
35635c4a733SAlexander Graf 
357cc6929ccSSuraj Jitindar Singh 	if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
358cc6929ccSSuraj Jitindar Singh 		r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
359cc6929ccSSuraj Jitindar Singh 							    size);
360cc6929ccSSuraj Jitindar Singh 
361cc6929ccSSuraj Jitindar Singh 	if ((!r) || (r == -EAGAIN))
362cc6929ccSSuraj Jitindar Singh 		return r;
363cc6929ccSSuraj Jitindar Singh 
36435c4a733SAlexander Graf 	r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
36535c4a733SAlexander Graf 			 XLATE_WRITE, &pte);
36635c4a733SAlexander Graf 	if (r < 0)
36735c4a733SAlexander Graf 		return r;
36835c4a733SAlexander Graf 
36935c4a733SAlexander Graf 	*eaddr = pte.raddr;
37035c4a733SAlexander Graf 
37135c4a733SAlexander Graf 	if (!pte.may_write)
37235c4a733SAlexander Graf 		return -EPERM;
37335c4a733SAlexander Graf 
374c12fb43cSAlexander Graf 	/* Magic page override */
375c12fb43cSAlexander Graf 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
376c12fb43cSAlexander Graf 	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
377c12fb43cSAlexander Graf 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
378c12fb43cSAlexander Graf 		void *magic = vcpu->arch.shared;
379c12fb43cSAlexander Graf 		magic += pte.eaddr & 0xfff;
380c12fb43cSAlexander Graf 		memcpy(magic, ptr, size);
381c12fb43cSAlexander Graf 		return EMULATE_DONE;
382c12fb43cSAlexander Graf 	}
383c12fb43cSAlexander Graf 
38435c4a733SAlexander Graf 	if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
38535c4a733SAlexander Graf 		return EMULATE_DO_MMIO;
38635c4a733SAlexander Graf 
38735c4a733SAlexander Graf 	return EMULATE_DONE;
38835c4a733SAlexander Graf }
38935c4a733SAlexander Graf EXPORT_SYMBOL_GPL(kvmppc_st);
39035c4a733SAlexander Graf 
kvmppc_ld(struct kvm_vcpu * vcpu,ulong * eaddr,int size,void * ptr,bool data)39135c4a733SAlexander Graf int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
39235c4a733SAlexander Graf 		      bool data)
39335c4a733SAlexander Graf {
394c12fb43cSAlexander Graf 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
39535c4a733SAlexander Graf 	struct kvmppc_pte pte;
396cc6929ccSSuraj Jitindar Singh 	int rc = -EINVAL;
39735c4a733SAlexander Graf 
39835c4a733SAlexander Graf 	vcpu->stat.ld++;
39935c4a733SAlexander Graf 
400cc6929ccSSuraj Jitindar Singh 	if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
401cc6929ccSSuraj Jitindar Singh 		rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
402cc6929ccSSuraj Jitindar Singh 							      size);
403cc6929ccSSuraj Jitindar Singh 
404cc6929ccSSuraj Jitindar Singh 	if ((!rc) || (rc == -EAGAIN))
405cc6929ccSSuraj Jitindar Singh 		return rc;
406cc6929ccSSuraj Jitindar Singh 
40735c4a733SAlexander Graf 	rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
40835c4a733SAlexander Graf 			  XLATE_READ, &pte);
40935c4a733SAlexander Graf 	if (rc)
41035c4a733SAlexander Graf 		return rc;
41135c4a733SAlexander Graf 
41235c4a733SAlexander Graf 	*eaddr = pte.raddr;
41335c4a733SAlexander Graf 
41435c4a733SAlexander Graf 	if (!pte.may_read)
41535c4a733SAlexander Graf 		return -EPERM;
41635c4a733SAlexander Graf 
41735c4a733SAlexander Graf 	if (!data && !pte.may_execute)
41835c4a733SAlexander Graf 		return -ENOEXEC;
41935c4a733SAlexander Graf 
420c12fb43cSAlexander Graf 	/* Magic page override */
421c12fb43cSAlexander Graf 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
422c12fb43cSAlexander Graf 	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
423c12fb43cSAlexander Graf 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
424c12fb43cSAlexander Graf 		void *magic = vcpu->arch.shared;
425c12fb43cSAlexander Graf 		magic += pte.eaddr & 0xfff;
426c12fb43cSAlexander Graf 		memcpy(ptr, magic, size);
427c12fb43cSAlexander Graf 		return EMULATE_DONE;
428c12fb43cSAlexander Graf 	}
429c12fb43cSAlexander Graf 
4302031f287SSean Christopherson 	kvm_vcpu_srcu_read_lock(vcpu);
4311508c22fSAlexey Kardashevskiy 	rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
4322031f287SSean Christopherson 	kvm_vcpu_srcu_read_unlock(vcpu);
4331508c22fSAlexey Kardashevskiy 	if (rc)
434c45c5514SAlexander Graf 		return EMULATE_DO_MMIO;
43535c4a733SAlexander Graf 
43635c4a733SAlexander Graf 	return EMULATE_DONE;
43735c4a733SAlexander Graf }
43835c4a733SAlexander Graf EXPORT_SYMBOL_GPL(kvmppc_ld);
43935c4a733SAlexander Graf 
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)440e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
441bbf45ba5SHollis Blanchard {
442cbbc58d4SAneesh Kumar K.V 	struct kvmppc_ops *kvm_ops = NULL;
4434feb74aaSFabiano Rosas 	int r;
4444feb74aaSFabiano Rosas 
445cbbc58d4SAneesh Kumar K.V 	/*
446cbbc58d4SAneesh Kumar K.V 	 * if we have both HV and PR enabled, default is HV
447cbbc58d4SAneesh Kumar K.V 	 */
448cbbc58d4SAneesh Kumar K.V 	if (type == 0) {
449cbbc58d4SAneesh Kumar K.V 		if (kvmppc_hv_ops)
450cbbc58d4SAneesh Kumar K.V 			kvm_ops = kvmppc_hv_ops;
451cbbc58d4SAneesh Kumar K.V 		else
452cbbc58d4SAneesh Kumar K.V 			kvm_ops = kvmppc_pr_ops;
453cbbc58d4SAneesh Kumar K.V 		if (!kvm_ops)
454cbbc58d4SAneesh Kumar K.V 			goto err_out;
455cbbc58d4SAneesh Kumar K.V 	} else	if (type == KVM_VM_PPC_HV) {
456cbbc58d4SAneesh Kumar K.V 		if (!kvmppc_hv_ops)
457cbbc58d4SAneesh Kumar K.V 			goto err_out;
458cbbc58d4SAneesh Kumar K.V 		kvm_ops = kvmppc_hv_ops;
459cbbc58d4SAneesh Kumar K.V 	} else if (type == KVM_VM_PPC_PR) {
460cbbc58d4SAneesh Kumar K.V 		if (!kvmppc_pr_ops)
461cbbc58d4SAneesh Kumar K.V 			goto err_out;
462cbbc58d4SAneesh Kumar K.V 		kvm_ops = kvmppc_pr_ops;
463cbbc58d4SAneesh Kumar K.V 	} else
464cbbc58d4SAneesh Kumar K.V 		goto err_out;
465e08b9637SCarsten Otte 
4664feb74aaSFabiano Rosas 	if (!try_module_get(kvm_ops->owner))
467cbbc58d4SAneesh Kumar K.V 		return -ENOENT;
468cbbc58d4SAneesh Kumar K.V 
469cbbc58d4SAneesh Kumar K.V 	kvm->arch.kvm_ops = kvm_ops;
4704feb74aaSFabiano Rosas 	r = kvmppc_core_init_vm(kvm);
4714feb74aaSFabiano Rosas 	if (r)
4724feb74aaSFabiano Rosas 		module_put(kvm_ops->owner);
4734feb74aaSFabiano Rosas 	return r;
474cbbc58d4SAneesh Kumar K.V err_out:
475cbbc58d4SAneesh Kumar K.V 	return -EINVAL;
476bbf45ba5SHollis Blanchard }
477bbf45ba5SHollis Blanchard 
kvm_arch_destroy_vm(struct kvm * kvm)478d89f5effSJan Kiszka void kvm_arch_destroy_vm(struct kvm *kvm)
479bbf45ba5SHollis Blanchard {
480e17769ebSSuresh E. Warrier #ifdef CONFIG_KVM_XICS
481e17769ebSSuresh E. Warrier 	/*
482e17769ebSSuresh E. Warrier 	 * We call kick_all_cpus_sync() to ensure that all
483e17769ebSSuresh E. Warrier 	 * CPUs have executed any pending IPIs before we
484e17769ebSSuresh E. Warrier 	 * continue and free VCPUs structures below.
485e17769ebSSuresh E. Warrier 	 */
486e17769ebSSuresh E. Warrier 	if (is_kvmppc_hv_enabled(kvm))
487e17769ebSSuresh E. Warrier 		kick_all_cpus_sync();
488e17769ebSSuresh E. Warrier #endif
489e17769ebSSuresh E. Warrier 
49027592ae8SMarc Zyngier 	kvm_destroy_vcpus(kvm);
491988a2caeSGleb Natapov 
492988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
493f9e0554dSPaul Mackerras 
494f9e0554dSPaul Mackerras 	kvmppc_core_destroy_vm(kvm);
495f9e0554dSPaul Mackerras 
496988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
497cbbc58d4SAneesh Kumar K.V 
498cbbc58d4SAneesh Kumar K.V 	/* drop the module reference */
499cbbc58d4SAneesh Kumar K.V 	module_put(kvm->arch.kvm_ops->owner);
500bbf45ba5SHollis Blanchard }
501bbf45ba5SHollis Blanchard 
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)502784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
503bbf45ba5SHollis Blanchard {
504bbf45ba5SHollis Blanchard 	int r;
5057a58777aSAlexander Graf 	/* Assume we're using HV mode when the HV module is loaded */
506cbbc58d4SAneesh Kumar K.V 	int hv_enabled = kvmppc_hv_ops ? 1 : 0;
507bbf45ba5SHollis Blanchard 
5087a58777aSAlexander Graf 	if (kvm) {
5097a58777aSAlexander Graf 		/*
5107a58777aSAlexander Graf 		 * Hooray - we know which VM type we're running on. Depend on
5117a58777aSAlexander Graf 		 * that rather than the guess above.
5127a58777aSAlexander Graf 		 */
5137a58777aSAlexander Graf 		hv_enabled = is_kvmppc_hv_enabled(kvm);
5147a58777aSAlexander Graf 	}
5157a58777aSAlexander Graf 
516bbf45ba5SHollis Blanchard 	switch (ext) {
5175ce941eeSScott Wood #ifdef CONFIG_BOOKE
5185ce941eeSScott Wood 	case KVM_CAP_PPC_BOOKE_SREGS:
519f61c94bbSBharat Bhushan 	case KVM_CAP_PPC_BOOKE_WATCHDOG:
5201c810636SAlexander Graf 	case KVM_CAP_PPC_EPR:
5215ce941eeSScott Wood #else
522e15a1137SAlexander Graf 	case KVM_CAP_PPC_SEGSTATE:
5231022fc3dSAlexander Graf 	case KVM_CAP_PPC_HIOR:
524930b412aSAlexander Graf 	case KVM_CAP_PPC_PAPR:
5255ce941eeSScott Wood #endif
52618978768SAlexander Graf 	case KVM_CAP_PPC_UNSET_IRQ:
5277b4203e8SAlexander Graf 	case KVM_CAP_PPC_IRQ_LEVEL:
52871fbfd5fSAlexander Graf 	case KVM_CAP_ENABLE_CAP:
529e24ed81fSAlexander Graf 	case KVM_CAP_ONE_REG:
5300e673fb6SAlexander Graf 	case KVM_CAP_IOEVENTFD:
5315df554adSScott Wood 	case KVM_CAP_DEVICE_CTRL:
532460df4c1SPaolo Bonzini 	case KVM_CAP_IMMEDIATE_EXIT:
533b9b2782cSPeter Xu 	case KVM_CAP_SET_GUEST_DEBUG:
534de56a948SPaul Mackerras 		r = 1;
535de56a948SPaul Mackerras 		break;
5361a9167a2SFabiano Rosas 	case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
537de56a948SPaul Mackerras 	case KVM_CAP_PPC_PAIRED_SINGLES:
538ad0a048bSAlexander Graf 	case KVM_CAP_PPC_OSI:
53915711e9cSAlexander Graf 	case KVM_CAP_PPC_GET_PVINFO:
540bf7ca4bdSAlexander Graf #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
541dc83b8bcSScott Wood 	case KVM_CAP_SW_TLB:
542dc83b8bcSScott Wood #endif
543699cc876SAneesh Kumar K.V 		/* We support this only for PR */
544cbbc58d4SAneesh Kumar K.V 		r = !hv_enabled;
545e15a1137SAlexander Graf 		break;
546699cc876SAneesh Kumar K.V #ifdef CONFIG_KVM_MPIC
547699cc876SAneesh Kumar K.V 	case KVM_CAP_IRQ_MPIC:
548699cc876SAneesh Kumar K.V 		r = 1;
549699cc876SAneesh Kumar K.V 		break;
550699cc876SAneesh Kumar K.V #endif
551699cc876SAneesh Kumar K.V 
552f31e65e1SBenjamin Herrenschmidt #ifdef CONFIG_PPC_BOOK3S_64
55354738c09SDavid Gibson 	case KVM_CAP_SPAPR_TCE:
55458ded420SAlexey Kardashevskiy 	case KVM_CAP_SPAPR_TCE_64:
555693ac10aSSuraj Jitindar Singh 		r = 1;
556693ac10aSSuraj Jitindar Singh 		break;
557121f80baSAlexey Kardashevskiy 	case KVM_CAP_SPAPR_TCE_VFIO:
558693ac10aSSuraj Jitindar Singh 		r = !!cpu_has_feature(CPU_FTR_HVMODE);
559693ac10aSSuraj Jitindar Singh 		break;
5608e591cb7SMichael Ellerman 	case KVM_CAP_PPC_RTAS:
561f2e91042SAlexander Graf 	case KVM_CAP_PPC_FIXUP_HCALL:
562699a0ea0SPaul Mackerras 	case KVM_CAP_PPC_ENABLE_HCALL:
5635975a2e0SPaul Mackerras #ifdef CONFIG_KVM_XICS
5645975a2e0SPaul Mackerras 	case KVM_CAP_IRQ_XICS:
5655975a2e0SPaul Mackerras #endif
5663214d01fSPaul Mackerras 	case KVM_CAP_PPC_GET_CPU_CHAR:
56754738c09SDavid Gibson 		r = 1;
56854738c09SDavid Gibson 		break;
569eacc56bbSCédric Le Goater #ifdef CONFIG_KVM_XIVE
570eacc56bbSCédric Le Goater 	case KVM_CAP_PPC_IRQ_XIVE:
571eacc56bbSCédric Le Goater 		/*
5723fab2d10SCédric Le Goater 		 * We need XIVE to be enabled on the platform (implies
5733fab2d10SCédric Le Goater 		 * a POWER9 processor) and the PowerNV platform, as
5743fab2d10SCédric Le Goater 		 * nested is not yet supported.
575eacc56bbSCédric Le Goater 		 */
5762ad7a27dSPaul Mackerras 		r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
5772ad7a27dSPaul Mackerras 			kvmppc_xive_native_supported();
578eacc56bbSCédric Le Goater 		break;
579eacc56bbSCédric Le Goater #endif
580a8acaeceSDavid Gibson 
58152882b9cSAlexey Kardashevskiy #ifdef CONFIG_HAVE_KVM_IRQFD
58252882b9cSAlexey Kardashevskiy 	case KVM_CAP_IRQFD_RESAMPLE:
58352882b9cSAlexey Kardashevskiy 		r = !xive_enabled();
58452882b9cSAlexey Kardashevskiy 		break;
58552882b9cSAlexey Kardashevskiy #endif
58652882b9cSAlexey Kardashevskiy 
587a8acaeceSDavid Gibson 	case KVM_CAP_PPC_ALLOC_HTAB:
588a8acaeceSDavid Gibson 		r = hv_enabled;
589a8acaeceSDavid Gibson 		break;
590f31e65e1SBenjamin Herrenschmidt #endif /* CONFIG_PPC_BOOK3S_64 */
591699cc876SAneesh Kumar K.V #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
592371fefd6SPaul Mackerras 	case KVM_CAP_PPC_SMT:
593699cc876SAneesh Kumar K.V 		r = 0;
59457900694SPaul Mackerras 		if (kvm) {
59557900694SPaul Mackerras 			if (kvm->arch.emul_smt_mode > 1)
59657900694SPaul Mackerras 				r = kvm->arch.emul_smt_mode;
59757900694SPaul Mackerras 			else
5983c313524SPaul Mackerras 				r = kvm->arch.smt_mode;
59957900694SPaul Mackerras 		} else if (hv_enabled) {
60045c940baSPaul Mackerras 			if (cpu_has_feature(CPU_FTR_ARCH_300))
60145c940baSPaul Mackerras 				r = 1;
60245c940baSPaul Mackerras 			else
60345c940baSPaul Mackerras 				r = threads_per_subcore;
60445c940baSPaul Mackerras 		}
605371fefd6SPaul Mackerras 		break;
6062ed4f9ddSPaul Mackerras 	case KVM_CAP_PPC_SMT_POSSIBLE:
6072ed4f9ddSPaul Mackerras 		r = 1;
6082ed4f9ddSPaul Mackerras 		if (hv_enabled) {
6092ed4f9ddSPaul Mackerras 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
6102ed4f9ddSPaul Mackerras 				r = ((threads_per_subcore << 1) - 1);
6112ed4f9ddSPaul Mackerras 			else
6122ed4f9ddSPaul Mackerras 				/* P9 can emulate dbells, so allow any mode */
6132ed4f9ddSPaul Mackerras 				r = 8 | 4 | 2 | 1;
6142ed4f9ddSPaul Mackerras 		}
6152ed4f9ddSPaul Mackerras 		break;
616aa04b4ccSPaul Mackerras 	case KVM_CAP_PPC_RMA:
617c17b98cfSPaul Mackerras 		r = 0;
618aa04b4ccSPaul Mackerras 		break;
619e928e9cbSMichael Ellerman 	case KVM_CAP_PPC_HWRNG:
620e928e9cbSMichael Ellerman 		r = kvmppc_hwrng_present();
621e928e9cbSMichael Ellerman 		break;
622c9270132SPaul Mackerras 	case KVM_CAP_PPC_MMU_RADIX:
6238cf4ecc0SPaul Mackerras 		r = !!(hv_enabled && radix_enabled());
624c9270132SPaul Mackerras 		break;
625c9270132SPaul Mackerras 	case KVM_CAP_PPC_MMU_HASH_V3:
626a722076eSFabiano Rosas 		r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
627a722076eSFabiano Rosas 		       kvmppc_hv_ops->hash_v3_possible());
628c9270132SPaul Mackerras 		break;
629aa069a99SPaul Mackerras 	case KVM_CAP_PPC_NESTED_HV:
630aa069a99SPaul Mackerras 		r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
631aa069a99SPaul Mackerras 		       !kvmppc_hv_ops->enable_nested(NULL));
632aa069a99SPaul Mackerras 		break;
63354738c09SDavid Gibson #endif
634f4800b1fSAlexander Graf 	case KVM_CAP_SYNC_MMU:
635699cc876SAneesh Kumar K.V #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
636c17b98cfSPaul Mackerras 		r = hv_enabled;
637f4800b1fSAlexander Graf #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
638f4800b1fSAlexander Graf 		r = 1;
639f4800b1fSAlexander Graf #else
640f4800b1fSAlexander Graf 		r = 0;
641a2932923SPaul Mackerras #endif
642699cc876SAneesh Kumar K.V 		break;
643699cc876SAneesh Kumar K.V #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
644a2932923SPaul Mackerras 	case KVM_CAP_PPC_HTAB_FD:
645cbbc58d4SAneesh Kumar K.V 		r = hv_enabled;
646a2932923SPaul Mackerras 		break;
647f4800b1fSAlexander Graf #endif
648b5434032SMatt Evans 	case KVM_CAP_NR_VCPUS:
649b5434032SMatt Evans 		/*
650b5434032SMatt Evans 		 * Recommending a number of CPUs is somewhat arbitrary; we
651b5434032SMatt Evans 		 * return the number of present CPUs for -HV (since a host
652b5434032SMatt Evans 		 * will have secondary threads "offline"), and for other KVM
653b5434032SMatt Evans 		 * implementations just count online CPUs.
654b5434032SMatt Evans 		 */
655cbbc58d4SAneesh Kumar K.V 		if (hv_enabled)
656b7915d55SVitaly Kuznetsov 			r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
657699cc876SAneesh Kumar K.V 		else
658b7915d55SVitaly Kuznetsov 			r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
659b5434032SMatt Evans 		break;
660b5434032SMatt Evans 	case KVM_CAP_MAX_VCPUS:
661b5434032SMatt Evans 		r = KVM_MAX_VCPUS;
662b5434032SMatt Evans 		break;
663a86cb413SThomas Huth 	case KVM_CAP_MAX_VCPU_ID:
664a1c42ddeSJuergen Gross 		r = KVM_MAX_VCPU_IDS;
665a86cb413SThomas Huth 		break;
6665b74716eSBenjamin Herrenschmidt #ifdef CONFIG_PPC_BOOK3S_64
6675b74716eSBenjamin Herrenschmidt 	case KVM_CAP_PPC_GET_SMMU_INFO:
6685b74716eSBenjamin Herrenschmidt 		r = 1;
6695b74716eSBenjamin Herrenschmidt 		break;
670d3695aa4SAlexey Kardashevskiy 	case KVM_CAP_SPAPR_MULTITCE:
671d3695aa4SAlexey Kardashevskiy 		r = 1;
672d3695aa4SAlexey Kardashevskiy 		break;
673050f2339SDavid Gibson 	case KVM_CAP_SPAPR_RESIZE_HPT:
674790a9df5SDavid Gibson 		r = !!hv_enabled;
675050f2339SDavid Gibson 		break;
6765b74716eSBenjamin Herrenschmidt #endif
677134764edSAravinda Prasad #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
678134764edSAravinda Prasad 	case KVM_CAP_PPC_FWNMI:
679134764edSAravinda Prasad 		r = hv_enabled;
680134764edSAravinda Prasad 		break;
681134764edSAravinda Prasad #endif
6824bb3c7a0SPaul Mackerras #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
68323528bb2SSam Bobroff 	case KVM_CAP_PPC_HTM:
684d234d68eSSimon Guo 		r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
685d234d68eSSimon Guo 		     (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
68623528bb2SSam Bobroff 		break;
6874bb3c7a0SPaul Mackerras #endif
6889a5788c6SPaul Mackerras #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
6899a5788c6SPaul Mackerras 	case KVM_CAP_PPC_SECURE_GUEST:
6909a5788c6SPaul Mackerras 		r = hv_enabled && kvmppc_hv_ops->enable_svm &&
6919a5788c6SPaul Mackerras 			!kvmppc_hv_ops->enable_svm(NULL);
6929a5788c6SPaul Mackerras 		break;
693d9a47edaSRavi Bangoria 	case KVM_CAP_PPC_DAWR1:
694d9a47edaSRavi Bangoria 		r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
695d9a47edaSRavi Bangoria 		       !kvmppc_hv_ops->enable_dawr1(NULL));
696d9a47edaSRavi Bangoria 		break;
697b87cc116SBharata B Rao 	case KVM_CAP_PPC_RPT_INVALIDATE:
698b87cc116SBharata B Rao 		r = 1;
699b87cc116SBharata B Rao 		break;
7009a5788c6SPaul Mackerras #endif
701f771b557SNicholas Piggin 	case KVM_CAP_PPC_AIL_MODE_3:
702f771b557SNicholas Piggin 		r = 0;
703f771b557SNicholas Piggin 		/*
704f771b557SNicholas Piggin 		 * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
705f771b557SNicholas Piggin 		 * The POWER9s can support it if the guest runs in hash mode,
706f771b557SNicholas Piggin 		 * but QEMU doesn't necessarily query the capability in time.
707f771b557SNicholas Piggin 		 */
708f771b557SNicholas Piggin 		if (hv_enabled) {
709f771b557SNicholas Piggin 			if (kvmhv_on_pseries()) {
710f771b557SNicholas Piggin 				if (pseries_reloc_on_exception())
711f771b557SNicholas Piggin 					r = 1;
712f771b557SNicholas Piggin 			} else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
713f771b557SNicholas Piggin 				  !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
714f771b557SNicholas Piggin 				r = 1;
715f771b557SNicholas Piggin 			}
716f771b557SNicholas Piggin 		}
717f771b557SNicholas Piggin 		break;
718bbf45ba5SHollis Blanchard 	default:
719bbf45ba5SHollis Blanchard 		r = 0;
720bbf45ba5SHollis Blanchard 		break;
721bbf45ba5SHollis Blanchard 	}
722bbf45ba5SHollis Blanchard 	return r;
723bbf45ba5SHollis Blanchard 
724bbf45ba5SHollis Blanchard }
725bbf45ba5SHollis Blanchard 
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)726bbf45ba5SHollis Blanchard long kvm_arch_dev_ioctl(struct file *filp,
727bbf45ba5SHollis Blanchard                         unsigned int ioctl, unsigned long arg)
728bbf45ba5SHollis Blanchard {
729bbf45ba5SHollis Blanchard 	return -EINVAL;
730bbf45ba5SHollis Blanchard }
731bbf45ba5SHollis Blanchard 
kvm_arch_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)732e96c81eeSSean Christopherson void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
733db3fe4ebSTakuya Yoshikawa {
734e96c81eeSSean Christopherson 	kvmppc_core_free_memslot(kvm, slot);
735db3fe4ebSTakuya Yoshikawa }
736db3fe4ebSTakuya Yoshikawa 
kvm_arch_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)737f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
738537a17b3SSean Christopherson 				   const struct kvm_memory_slot *old,
739537a17b3SSean Christopherson 				   struct kvm_memory_slot *new,
7407b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
741bbf45ba5SHollis Blanchard {
742eaaaed13SSean Christopherson 	return kvmppc_core_prepare_memory_region(kvm, old, new, change);
743bbf45ba5SHollis Blanchard }
744bbf45ba5SHollis Blanchard 
kvm_arch_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)745f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
7469d4c197cSSean Christopherson 				   struct kvm_memory_slot *old,
747f36f3f28SPaolo Bonzini 				   const struct kvm_memory_slot *new,
7488482644aSTakuya Yoshikawa 				   enum kvm_mr_change change)
749f7784b8eSMarcelo Tosatti {
750eaaaed13SSean Christopherson 	kvmppc_core_commit_memory_region(kvm, old, new, change);
751f7784b8eSMarcelo Tosatti }
752f7784b8eSMarcelo Tosatti 
kvm_arch_flush_shadow_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)7532df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
7542df72e9bSMarcelo Tosatti 				   struct kvm_memory_slot *slot)
75534d4cb8fSMarcelo Tosatti {
756dfe49dbdSPaul Mackerras 	kvmppc_core_flush_memslot(kvm, slot);
75734d4cb8fSMarcelo Tosatti }
75834d4cb8fSMarcelo Tosatti 
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)759897cc38eSSean Christopherson int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
760897cc38eSSean Christopherson {
761897cc38eSSean Christopherson 	return 0;
762897cc38eSSean Christopherson }
763897cc38eSSean Christopherson 
kvmppc_decrementer_wakeup(struct hrtimer * timer)76474ce2e60SSean Christopherson static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
76574ce2e60SSean Christopherson {
76674ce2e60SSean Christopherson 	struct kvm_vcpu *vcpu;
76774ce2e60SSean Christopherson 
76874ce2e60SSean Christopherson 	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
76974ce2e60SSean Christopherson 	kvmppc_decrementer_func(vcpu);
77074ce2e60SSean Christopherson 
77174ce2e60SSean Christopherson 	return HRTIMER_NORESTART;
77274ce2e60SSean Christopherson }
77374ce2e60SSean Christopherson 
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)774e529ef66SSean Christopherson int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
775bbf45ba5SHollis Blanchard {
776c50bfbdcSSean Christopherson 	int err;
777c50bfbdcSSean Christopherson 
77874ce2e60SSean Christopherson 	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
77974ce2e60SSean Christopherson 	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
78074ce2e60SSean Christopherson 
78174ce2e60SSean Christopherson #ifdef CONFIG_KVM_EXIT_TIMING
78274ce2e60SSean Christopherson 	mutex_init(&vcpu->arch.exit_timing_lock);
78374ce2e60SSean Christopherson #endif
78474ce2e60SSean Christopherson 	err = kvmppc_subarch_vcpu_init(vcpu);
785ff030fdfSSean Christopherson 	if (err)
786e529ef66SSean Christopherson 		return err;
787ff030fdfSSean Christopherson 
78874ce2e60SSean Christopherson 	err = kvmppc_core_vcpu_create(vcpu);
78974ce2e60SSean Christopherson 	if (err)
79074ce2e60SSean Christopherson 		goto out_vcpu_uninit;
79174ce2e60SSean Christopherson 
792510958e9SSean Christopherson 	rcuwait_init(&vcpu->arch.wait);
793510958e9SSean Christopherson 	vcpu->arch.waitp = &vcpu->arch.wait;
794e529ef66SSean Christopherson 	return 0;
79574ce2e60SSean Christopherson 
79674ce2e60SSean Christopherson out_vcpu_uninit:
79774ce2e60SSean Christopherson 	kvmppc_subarch_vcpu_uninit(vcpu);
79874ce2e60SSean Christopherson 	return err;
799bbf45ba5SHollis Blanchard }
800bbf45ba5SHollis Blanchard 
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)80131928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
80242897d86SMarcelo Tosatti {
80342897d86SMarcelo Tosatti }
80442897d86SMarcelo Tosatti 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)805d5279f3aSSean Christopherson void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
806bbf45ba5SHollis Blanchard {
807a595405dSAlexander Graf 	/* Make sure we're not using the vcpu anymore */
808a595405dSAlexander Graf 	hrtimer_cancel(&vcpu->arch.dec_timer);
809a595405dSAlexander Graf 
810eb1e4f43SScott Wood 	switch (vcpu->arch.irq_type) {
811eb1e4f43SScott Wood 	case KVMPPC_IRQ_MPIC:
812eb1e4f43SScott Wood 		kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
813eb1e4f43SScott Wood 		break;
814bc5ad3f3SBenjamin Herrenschmidt 	case KVMPPC_IRQ_XICS:
81503f95332SPaul Mackerras 		if (xics_on_xive())
8165af50993SBenjamin Herrenschmidt 			kvmppc_xive_cleanup_vcpu(vcpu);
8175af50993SBenjamin Herrenschmidt 		else
818bc5ad3f3SBenjamin Herrenschmidt 			kvmppc_xics_free_icp(vcpu);
819bc5ad3f3SBenjamin Herrenschmidt 		break;
820eacc56bbSCédric Le Goater 	case KVMPPC_IRQ_XIVE:
821eacc56bbSCédric Le Goater 		kvmppc_xive_native_cleanup_vcpu(vcpu);
822eacc56bbSCédric Le Goater 		break;
823eb1e4f43SScott Wood 	}
824eb1e4f43SScott Wood 
825db93f574SHollis Blanchard 	kvmppc_core_vcpu_free(vcpu);
82674ce2e60SSean Christopherson 
82774ce2e60SSean Christopherson 	kvmppc_subarch_vcpu_uninit(vcpu);
828bbf45ba5SHollis Blanchard }
829bbf45ba5SHollis Blanchard 
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)830bbf45ba5SHollis Blanchard int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
831bbf45ba5SHollis Blanchard {
8329dd921cfSHollis Blanchard 	return kvmppc_core_pending_dec(vcpu);
833bbf45ba5SHollis Blanchard }
834bbf45ba5SHollis Blanchard 
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)835bbf45ba5SHollis Blanchard void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
836bbf45ba5SHollis Blanchard {
837eab17672SScott Wood #ifdef CONFIG_BOOKE
838eab17672SScott Wood 	/*
839eab17672SScott Wood 	 * vrsave (formerly usprg0) isn't used by Linux, but may
840eab17672SScott Wood 	 * be used by the guest.
841eab17672SScott Wood 	 *
842eab17672SScott Wood 	 * On non-booke this is associated with Altivec and
843eab17672SScott Wood 	 * is handled by code in book3s.c.
844eab17672SScott Wood 	 */
845eab17672SScott Wood 	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
846eab17672SScott Wood #endif
8479dd921cfSHollis Blanchard 	kvmppc_core_vcpu_load(vcpu, cpu);
848bbf45ba5SHollis Blanchard }
849bbf45ba5SHollis Blanchard 
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)850bbf45ba5SHollis Blanchard void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
851bbf45ba5SHollis Blanchard {
8529dd921cfSHollis Blanchard 	kvmppc_core_vcpu_put(vcpu);
853eab17672SScott Wood #ifdef CONFIG_BOOKE
854eab17672SScott Wood 	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
855eab17672SScott Wood #endif
856bbf45ba5SHollis Blanchard }
857bbf45ba5SHollis Blanchard 
8589576730dSSuresh Warrier /*
8599576730dSSuresh Warrier  * irq_bypass_add_producer and irq_bypass_del_producer are only
8609576730dSSuresh Warrier  * useful if the architecture supports PCI passthrough.
8619576730dSSuresh Warrier  * irq_bypass_stop and irq_bypass_start are not needed and so
8629576730dSSuresh Warrier  * kvm_ops are not defined for them.
8639576730dSSuresh Warrier  */
kvm_arch_has_irq_bypass(void)8649576730dSSuresh Warrier bool kvm_arch_has_irq_bypass(void)
8659576730dSSuresh Warrier {
8669576730dSSuresh Warrier 	return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
8679576730dSSuresh Warrier 		(kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
8689576730dSSuresh Warrier }
8699576730dSSuresh Warrier 
kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)8709576730dSSuresh Warrier int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
8719576730dSSuresh Warrier 				     struct irq_bypass_producer *prod)
8729576730dSSuresh Warrier {
8739576730dSSuresh Warrier 	struct kvm_kernel_irqfd *irqfd =
8749576730dSSuresh Warrier 		container_of(cons, struct kvm_kernel_irqfd, consumer);
8759576730dSSuresh Warrier 	struct kvm *kvm = irqfd->kvm;
8769576730dSSuresh Warrier 
8779576730dSSuresh Warrier 	if (kvm->arch.kvm_ops->irq_bypass_add_producer)
8789576730dSSuresh Warrier 		return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
8799576730dSSuresh Warrier 
8809576730dSSuresh Warrier 	return 0;
8819576730dSSuresh Warrier }
8829576730dSSuresh Warrier 
kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)8839576730dSSuresh Warrier void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
8849576730dSSuresh Warrier 				      struct irq_bypass_producer *prod)
8859576730dSSuresh Warrier {
8869576730dSSuresh Warrier 	struct kvm_kernel_irqfd *irqfd =
8879576730dSSuresh Warrier 		container_of(cons, struct kvm_kernel_irqfd, consumer);
8889576730dSSuresh Warrier 	struct kvm *kvm = irqfd->kvm;
8899576730dSSuresh Warrier 
8909576730dSSuresh Warrier 	if (kvm->arch.kvm_ops->irq_bypass_del_producer)
8919576730dSSuresh Warrier 		kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
8929576730dSSuresh Warrier }
8939576730dSSuresh Warrier 
8946f63e81bSBin Lu #ifdef CONFIG_VSX
kvmppc_get_vsr_dword_offset(int index)8956f63e81bSBin Lu static inline int kvmppc_get_vsr_dword_offset(int index)
8966f63e81bSBin Lu {
8976f63e81bSBin Lu 	int offset;
8986f63e81bSBin Lu 
8996f63e81bSBin Lu 	if ((index != 0) && (index != 1))
9006f63e81bSBin Lu 		return -1;
9016f63e81bSBin Lu 
9026f63e81bSBin Lu #ifdef __BIG_ENDIAN
9036f63e81bSBin Lu 	offset =  index;
9046f63e81bSBin Lu #else
9056f63e81bSBin Lu 	offset = 1 - index;
9066f63e81bSBin Lu #endif
9076f63e81bSBin Lu 
9086f63e81bSBin Lu 	return offset;
9096f63e81bSBin Lu }
9106f63e81bSBin Lu 
kvmppc_get_vsr_word_offset(int index)9116f63e81bSBin Lu static inline int kvmppc_get_vsr_word_offset(int index)
9126f63e81bSBin Lu {
9136f63e81bSBin Lu 	int offset;
9146f63e81bSBin Lu 
9156f63e81bSBin Lu 	if ((index > 3) || (index < 0))
9166f63e81bSBin Lu 		return -1;
9176f63e81bSBin Lu 
9186f63e81bSBin Lu #ifdef __BIG_ENDIAN
9196f63e81bSBin Lu 	offset = index;
9206f63e81bSBin Lu #else
9216f63e81bSBin Lu 	offset = 3 - index;
9226f63e81bSBin Lu #endif
9236f63e81bSBin Lu 	return offset;
9246f63e81bSBin Lu }
9256f63e81bSBin Lu 
kvmppc_set_vsr_dword(struct kvm_vcpu * vcpu,u64 gpr)9266f63e81bSBin Lu static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
9276f63e81bSBin Lu 	u64 gpr)
9286f63e81bSBin Lu {
9296f63e81bSBin Lu 	union kvmppc_one_reg val;
9306f63e81bSBin Lu 	int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
9316f63e81bSBin Lu 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
9326f63e81bSBin Lu 
9336f63e81bSBin Lu 	if (offset == -1)
9346f63e81bSBin Lu 		return;
9356f63e81bSBin Lu 
9364eeb8556SSimon Guo 	if (index >= 32) {
9374eeb8556SSimon Guo 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
9386f63e81bSBin Lu 		val.vsxval[offset] = gpr;
9394eeb8556SSimon Guo 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
9406f63e81bSBin Lu 	} else {
9416f63e81bSBin Lu 		VCPU_VSX_FPR(vcpu, index, offset) = gpr;
9426f63e81bSBin Lu 	}
9436f63e81bSBin Lu }
9446f63e81bSBin Lu 
kvmppc_set_vsr_dword_dump(struct kvm_vcpu * vcpu,u64 gpr)9456f63e81bSBin Lu static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
9466f63e81bSBin Lu 	u64 gpr)
9476f63e81bSBin Lu {
9486f63e81bSBin Lu 	union kvmppc_one_reg val;
9496f63e81bSBin Lu 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
9506f63e81bSBin Lu 
9514eeb8556SSimon Guo 	if (index >= 32) {
9524eeb8556SSimon Guo 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
9536f63e81bSBin Lu 		val.vsxval[0] = gpr;
9546f63e81bSBin Lu 		val.vsxval[1] = gpr;
9554eeb8556SSimon Guo 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
9566f63e81bSBin Lu 	} else {
9576f63e81bSBin Lu 		VCPU_VSX_FPR(vcpu, index, 0) = gpr;
9586f63e81bSBin Lu 		VCPU_VSX_FPR(vcpu, index, 1) = gpr;
9596f63e81bSBin Lu 	}
9606f63e81bSBin Lu }
9616f63e81bSBin Lu 
kvmppc_set_vsr_word_dump(struct kvm_vcpu * vcpu,u32 gpr)96294dd7fa1SSimon Guo static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
96394dd7fa1SSimon Guo 	u32 gpr)
96494dd7fa1SSimon Guo {
96594dd7fa1SSimon Guo 	union kvmppc_one_reg val;
96694dd7fa1SSimon Guo 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
96794dd7fa1SSimon Guo 
9684eeb8556SSimon Guo 	if (index >= 32) {
96994dd7fa1SSimon Guo 		val.vsx32val[0] = gpr;
97094dd7fa1SSimon Guo 		val.vsx32val[1] = gpr;
97194dd7fa1SSimon Guo 		val.vsx32val[2] = gpr;
97294dd7fa1SSimon Guo 		val.vsx32val[3] = gpr;
9734eeb8556SSimon Guo 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
97494dd7fa1SSimon Guo 	} else {
97594dd7fa1SSimon Guo 		val.vsx32val[0] = gpr;
97694dd7fa1SSimon Guo 		val.vsx32val[1] = gpr;
97794dd7fa1SSimon Guo 		VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
97894dd7fa1SSimon Guo 		VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
97994dd7fa1SSimon Guo 	}
98094dd7fa1SSimon Guo }
98194dd7fa1SSimon Guo 
kvmppc_set_vsr_word(struct kvm_vcpu * vcpu,u32 gpr32)9826f63e81bSBin Lu static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
9836f63e81bSBin Lu 	u32 gpr32)
9846f63e81bSBin Lu {
9856f63e81bSBin Lu 	union kvmppc_one_reg val;
9866f63e81bSBin Lu 	int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
9876f63e81bSBin Lu 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
9886f63e81bSBin Lu 	int dword_offset, word_offset;
9896f63e81bSBin Lu 
9906f63e81bSBin Lu 	if (offset == -1)
9916f63e81bSBin Lu 		return;
9926f63e81bSBin Lu 
9934eeb8556SSimon Guo 	if (index >= 32) {
9944eeb8556SSimon Guo 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
9956f63e81bSBin Lu 		val.vsx32val[offset] = gpr32;
9964eeb8556SSimon Guo 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
9976f63e81bSBin Lu 	} else {
9986f63e81bSBin Lu 		dword_offset = offset / 2;
9996f63e81bSBin Lu 		word_offset = offset % 2;
10006f63e81bSBin Lu 		val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
10016f63e81bSBin Lu 		val.vsx32val[word_offset] = gpr32;
10026f63e81bSBin Lu 		VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
10036f63e81bSBin Lu 	}
10046f63e81bSBin Lu }
10056f63e81bSBin Lu #endif /* CONFIG_VSX */
10066f63e81bSBin Lu 
100709f98496SJose Ricardo Ziviani #ifdef CONFIG_ALTIVEC
kvmppc_get_vmx_offset_generic(struct kvm_vcpu * vcpu,int index,int element_size)1008acc9eb93SSimon Guo static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
1009acc9eb93SSimon Guo 		int index, int element_size)
1010acc9eb93SSimon Guo {
1011acc9eb93SSimon Guo 	int offset;
1012acc9eb93SSimon Guo 	int elts = sizeof(vector128)/element_size;
1013acc9eb93SSimon Guo 
1014acc9eb93SSimon Guo 	if ((index < 0) || (index >= elts))
1015acc9eb93SSimon Guo 		return -1;
1016acc9eb93SSimon Guo 
1017acc9eb93SSimon Guo 	if (kvmppc_need_byteswap(vcpu))
1018acc9eb93SSimon Guo 		offset = elts - index - 1;
1019acc9eb93SSimon Guo 	else
1020acc9eb93SSimon Guo 		offset = index;
1021acc9eb93SSimon Guo 
1022acc9eb93SSimon Guo 	return offset;
1023acc9eb93SSimon Guo }
1024acc9eb93SSimon Guo 
kvmppc_get_vmx_dword_offset(struct kvm_vcpu * vcpu,int index)1025acc9eb93SSimon Guo static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1026acc9eb93SSimon Guo 		int index)
1027acc9eb93SSimon Guo {
1028acc9eb93SSimon Guo 	return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1029acc9eb93SSimon Guo }
1030acc9eb93SSimon Guo 
kvmppc_get_vmx_word_offset(struct kvm_vcpu * vcpu,int index)1031acc9eb93SSimon Guo static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1032acc9eb93SSimon Guo 		int index)
1033acc9eb93SSimon Guo {
1034acc9eb93SSimon Guo 	return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1035acc9eb93SSimon Guo }
1036acc9eb93SSimon Guo 
kvmppc_get_vmx_hword_offset(struct kvm_vcpu * vcpu,int index)1037acc9eb93SSimon Guo static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1038acc9eb93SSimon Guo 		int index)
1039acc9eb93SSimon Guo {
1040acc9eb93SSimon Guo 	return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1041acc9eb93SSimon Guo }
1042acc9eb93SSimon Guo 
kvmppc_get_vmx_byte_offset(struct kvm_vcpu * vcpu,int index)1043acc9eb93SSimon Guo static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1044acc9eb93SSimon Guo 		int index)
1045acc9eb93SSimon Guo {
1046acc9eb93SSimon Guo 	return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1047acc9eb93SSimon Guo }
1048acc9eb93SSimon Guo 
1049acc9eb93SSimon Guo 
kvmppc_set_vmx_dword(struct kvm_vcpu * vcpu,u64 gpr)105009f98496SJose Ricardo Ziviani static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
105109f98496SJose Ricardo Ziviani 	u64 gpr)
105209f98496SJose Ricardo Ziviani {
1053acc9eb93SSimon Guo 	union kvmppc_one_reg val;
1054acc9eb93SSimon Guo 	int offset = kvmppc_get_vmx_dword_offset(vcpu,
1055acc9eb93SSimon Guo 			vcpu->arch.mmio_vmx_offset);
105609f98496SJose Ricardo Ziviani 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
105709f98496SJose Ricardo Ziviani 
1058acc9eb93SSimon Guo 	if (offset == -1)
105909f98496SJose Ricardo Ziviani 		return;
106009f98496SJose Ricardo Ziviani 
1061acc9eb93SSimon Guo 	val.vval = VCPU_VSX_VR(vcpu, index);
1062acc9eb93SSimon Guo 	val.vsxval[offset] = gpr;
1063acc9eb93SSimon Guo 	VCPU_VSX_VR(vcpu, index) = val.vval;
1064acc9eb93SSimon Guo }
106509f98496SJose Ricardo Ziviani 
kvmppc_set_vmx_word(struct kvm_vcpu * vcpu,u32 gpr32)1066acc9eb93SSimon Guo static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1067acc9eb93SSimon Guo 	u32 gpr32)
1068acc9eb93SSimon Guo {
1069acc9eb93SSimon Guo 	union kvmppc_one_reg val;
1070acc9eb93SSimon Guo 	int offset = kvmppc_get_vmx_word_offset(vcpu,
1071acc9eb93SSimon Guo 			vcpu->arch.mmio_vmx_offset);
1072acc9eb93SSimon Guo 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1073acc9eb93SSimon Guo 
1074acc9eb93SSimon Guo 	if (offset == -1)
1075acc9eb93SSimon Guo 		return;
1076acc9eb93SSimon Guo 
1077acc9eb93SSimon Guo 	val.vval = VCPU_VSX_VR(vcpu, index);
1078acc9eb93SSimon Guo 	val.vsx32val[offset] = gpr32;
1079acc9eb93SSimon Guo 	VCPU_VSX_VR(vcpu, index) = val.vval;
1080acc9eb93SSimon Guo }
1081acc9eb93SSimon Guo 
kvmppc_set_vmx_hword(struct kvm_vcpu * vcpu,u16 gpr16)1082acc9eb93SSimon Guo static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1083acc9eb93SSimon Guo 	u16 gpr16)
1084acc9eb93SSimon Guo {
1085acc9eb93SSimon Guo 	union kvmppc_one_reg val;
1086acc9eb93SSimon Guo 	int offset = kvmppc_get_vmx_hword_offset(vcpu,
1087acc9eb93SSimon Guo 			vcpu->arch.mmio_vmx_offset);
1088acc9eb93SSimon Guo 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1089acc9eb93SSimon Guo 
1090acc9eb93SSimon Guo 	if (offset == -1)
1091acc9eb93SSimon Guo 		return;
1092acc9eb93SSimon Guo 
1093acc9eb93SSimon Guo 	val.vval = VCPU_VSX_VR(vcpu, index);
1094acc9eb93SSimon Guo 	val.vsx16val[offset] = gpr16;
1095acc9eb93SSimon Guo 	VCPU_VSX_VR(vcpu, index) = val.vval;
1096acc9eb93SSimon Guo }
1097acc9eb93SSimon Guo 
kvmppc_set_vmx_byte(struct kvm_vcpu * vcpu,u8 gpr8)1098acc9eb93SSimon Guo static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1099acc9eb93SSimon Guo 	u8 gpr8)
1100acc9eb93SSimon Guo {
1101acc9eb93SSimon Guo 	union kvmppc_one_reg val;
1102acc9eb93SSimon Guo 	int offset = kvmppc_get_vmx_byte_offset(vcpu,
1103acc9eb93SSimon Guo 			vcpu->arch.mmio_vmx_offset);
1104acc9eb93SSimon Guo 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1105acc9eb93SSimon Guo 
1106acc9eb93SSimon Guo 	if (offset == -1)
1107acc9eb93SSimon Guo 		return;
1108acc9eb93SSimon Guo 
1109acc9eb93SSimon Guo 	val.vval = VCPU_VSX_VR(vcpu, index);
1110acc9eb93SSimon Guo 	val.vsx8val[offset] = gpr8;
1111acc9eb93SSimon Guo 	VCPU_VSX_VR(vcpu, index) = val.vval;
111209f98496SJose Ricardo Ziviani }
111309f98496SJose Ricardo Ziviani #endif /* CONFIG_ALTIVEC */
111409f98496SJose Ricardo Ziviani 
11156f63e81bSBin Lu #ifdef CONFIG_PPC_FPU
sp_to_dp(u32 fprs)11166f63e81bSBin Lu static inline u64 sp_to_dp(u32 fprs)
11176f63e81bSBin Lu {
11186f63e81bSBin Lu 	u64 fprd;
11196f63e81bSBin Lu 
11206f63e81bSBin Lu 	preempt_disable();
11216f63e81bSBin Lu 	enable_kernel_fp();
11222a24d80fSNick Desaulniers 	asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
11236f63e81bSBin Lu 	     : "fr0");
11246f63e81bSBin Lu 	preempt_enable();
11256f63e81bSBin Lu 	return fprd;
11266f63e81bSBin Lu }
11276f63e81bSBin Lu 
dp_to_sp(u64 fprd)11286f63e81bSBin Lu static inline u32 dp_to_sp(u64 fprd)
11296f63e81bSBin Lu {
11306f63e81bSBin Lu 	u32 fprs;
11316f63e81bSBin Lu 
11326f63e81bSBin Lu 	preempt_disable();
11336f63e81bSBin Lu 	enable_kernel_fp();
11342a24d80fSNick Desaulniers 	asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
11356f63e81bSBin Lu 	     : "fr0");
11366f63e81bSBin Lu 	preempt_enable();
11376f63e81bSBin Lu 	return fprs;
11386f63e81bSBin Lu }
11396f63e81bSBin Lu 
11406f63e81bSBin Lu #else
11416f63e81bSBin Lu #define sp_to_dp(x)	(x)
11426f63e81bSBin Lu #define dp_to_sp(x)	(x)
11436f63e81bSBin Lu #endif /* CONFIG_PPC_FPU */
11446f63e81bSBin Lu 
kvmppc_complete_mmio_load(struct kvm_vcpu * vcpu)11458c99d345STianjia Zhang static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1146bbf45ba5SHollis Blanchard {
11478c99d345STianjia Zhang 	struct kvm_run *run = vcpu->run;
11483f649ab7SKees Cook 	u64 gpr;
1149bbf45ba5SHollis Blanchard 
11503f831504SFabiano Rosas 	if (run->mmio.len > sizeof(gpr))
1151bbf45ba5SHollis Blanchard 		return;
1152bbf45ba5SHollis Blanchard 
1153d078eed3SDavid Gibson 	if (!vcpu->arch.mmio_host_swabbed) {
1154bbf45ba5SHollis Blanchard 		switch (run->mmio.len) {
1155b104d066SAlexander Graf 		case 8: gpr = *(u64 *)run->mmio.data; break;
11568e5b26b5SAlexander Graf 		case 4: gpr = *(u32 *)run->mmio.data; break;
11578e5b26b5SAlexander Graf 		case 2: gpr = *(u16 *)run->mmio.data; break;
11588e5b26b5SAlexander Graf 		case 1: gpr = *(u8 *)run->mmio.data; break;
1159bbf45ba5SHollis Blanchard 		}
1160bbf45ba5SHollis Blanchard 	} else {
1161bbf45ba5SHollis Blanchard 		switch (run->mmio.len) {
1162d078eed3SDavid Gibson 		case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1163d078eed3SDavid Gibson 		case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1164d078eed3SDavid Gibson 		case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
11658e5b26b5SAlexander Graf 		case 1: gpr = *(u8 *)run->mmio.data; break;
1166bbf45ba5SHollis Blanchard 		}
1167bbf45ba5SHollis Blanchard 	}
11688e5b26b5SAlexander Graf 
11696f63e81bSBin Lu 	/* conversion between single and double precision */
11706f63e81bSBin Lu 	if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
11716f63e81bSBin Lu 		gpr = sp_to_dp(gpr);
11726f63e81bSBin Lu 
11733587d534SAlexander Graf 	if (vcpu->arch.mmio_sign_extend) {
11743587d534SAlexander Graf 		switch (run->mmio.len) {
11753587d534SAlexander Graf #ifdef CONFIG_PPC64
11763587d534SAlexander Graf 		case 4:
11773587d534SAlexander Graf 			gpr = (s64)(s32)gpr;
11783587d534SAlexander Graf 			break;
11793587d534SAlexander Graf #endif
11803587d534SAlexander Graf 		case 2:
11813587d534SAlexander Graf 			gpr = (s64)(s16)gpr;
11823587d534SAlexander Graf 			break;
11833587d534SAlexander Graf 		case 1:
11843587d534SAlexander Graf 			gpr = (s64)(s8)gpr;
11853587d534SAlexander Graf 			break;
11863587d534SAlexander Graf 		}
11873587d534SAlexander Graf 	}
11883587d534SAlexander Graf 
1189b3c5d3c2SAlexander Graf 	switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1190b3c5d3c2SAlexander Graf 	case KVM_MMIO_REG_GPR:
1191b104d066SAlexander Graf 		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1192b104d066SAlexander Graf 		break;
1193b3c5d3c2SAlexander Graf 	case KVM_MMIO_REG_FPR:
11942e6baa46SSimon Guo 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
11952e6baa46SSimon Guo 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
11962e6baa46SSimon Guo 
1197efff1912SPaul Mackerras 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1198b104d066SAlexander Graf 		break;
1199287d5611SAlexander Graf #ifdef CONFIG_PPC_BOOK3S
1200b3c5d3c2SAlexander Graf 	case KVM_MMIO_REG_QPR:
1201b3c5d3c2SAlexander Graf 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1202b104d066SAlexander Graf 		break;
1203b3c5d3c2SAlexander Graf 	case KVM_MMIO_REG_FQPR:
1204efff1912SPaul Mackerras 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1205b3c5d3c2SAlexander Graf 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1206b104d066SAlexander Graf 		break;
1207287d5611SAlexander Graf #endif
12086f63e81bSBin Lu #ifdef CONFIG_VSX
12096f63e81bSBin Lu 	case KVM_MMIO_REG_VSX:
12102e6baa46SSimon Guo 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
12112e6baa46SSimon Guo 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
12122e6baa46SSimon Guo 
1213da2a32b8SSimon Guo 		if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
12146f63e81bSBin Lu 			kvmppc_set_vsr_dword(vcpu, gpr);
1215da2a32b8SSimon Guo 		else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
12166f63e81bSBin Lu 			kvmppc_set_vsr_word(vcpu, gpr);
1217da2a32b8SSimon Guo 		else if (vcpu->arch.mmio_copy_type ==
12186f63e81bSBin Lu 				KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
12196f63e81bSBin Lu 			kvmppc_set_vsr_dword_dump(vcpu, gpr);
1220da2a32b8SSimon Guo 		else if (vcpu->arch.mmio_copy_type ==
122194dd7fa1SSimon Guo 				KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
122294dd7fa1SSimon Guo 			kvmppc_set_vsr_word_dump(vcpu, gpr);
12236f63e81bSBin Lu 		break;
12246f63e81bSBin Lu #endif
122509f98496SJose Ricardo Ziviani #ifdef CONFIG_ALTIVEC
122609f98496SJose Ricardo Ziviani 	case KVM_MMIO_REG_VMX:
12272e6baa46SSimon Guo 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
12282e6baa46SSimon Guo 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
12292e6baa46SSimon Guo 
1230acc9eb93SSimon Guo 		if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
123109f98496SJose Ricardo Ziviani 			kvmppc_set_vmx_dword(vcpu, gpr);
1232acc9eb93SSimon Guo 		else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1233acc9eb93SSimon Guo 			kvmppc_set_vmx_word(vcpu, gpr);
1234acc9eb93SSimon Guo 		else if (vcpu->arch.mmio_copy_type ==
1235acc9eb93SSimon Guo 				KVMPPC_VMX_COPY_HWORD)
1236acc9eb93SSimon Guo 			kvmppc_set_vmx_hword(vcpu, gpr);
1237acc9eb93SSimon Guo 		else if (vcpu->arch.mmio_copy_type ==
1238acc9eb93SSimon Guo 				KVMPPC_VMX_COPY_BYTE)
1239acc9eb93SSimon Guo 			kvmppc_set_vmx_byte(vcpu, gpr);
124009f98496SJose Ricardo Ziviani 		break;
124109f98496SJose Ricardo Ziviani #endif
1242873db2cdSSuraj Jitindar Singh #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1243873db2cdSSuraj Jitindar Singh 	case KVM_MMIO_REG_NESTED_GPR:
1244873db2cdSSuraj Jitindar Singh 		if (kvmppc_need_byteswap(vcpu))
1245873db2cdSSuraj Jitindar Singh 			gpr = swab64(gpr);
1246873db2cdSSuraj Jitindar Singh 		kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1247873db2cdSSuraj Jitindar Singh 				     sizeof(gpr));
1248873db2cdSSuraj Jitindar Singh 		break;
1249873db2cdSSuraj Jitindar Singh #endif
1250b104d066SAlexander Graf 	default:
1251b104d066SAlexander Graf 		BUG();
1252b104d066SAlexander Graf 	}
1253bbf45ba5SHollis Blanchard }
1254bbf45ba5SHollis Blanchard 
__kvmppc_handle_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian,int sign_extend)12558c99d345STianjia Zhang static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
125673601775SCédric Le Goater 				unsigned int rt, unsigned int bytes,
1257eb8b0560SPaul Mackerras 				int is_default_endian, int sign_extend)
1258bbf45ba5SHollis Blanchard {
12598c99d345STianjia Zhang 	struct kvm_run *run = vcpu->run;
1260ed840ee9SScott Wood 	int idx, ret;
1261d078eed3SDavid Gibson 	bool host_swabbed;
126273601775SCédric Le Goater 
1263d078eed3SDavid Gibson 	/* Pity C doesn't have a logical XOR operator */
126473601775SCédric Le Goater 	if (kvmppc_need_byteswap(vcpu)) {
1265d078eed3SDavid Gibson 		host_swabbed = is_default_endian;
126673601775SCédric Le Goater 	} else {
1267d078eed3SDavid Gibson 		host_swabbed = !is_default_endian;
126873601775SCédric Le Goater 	}
1269ed840ee9SScott Wood 
12703f831504SFabiano Rosas 	if (bytes > sizeof(run->mmio.data))
12713f831504SFabiano Rosas 		return EMULATE_FAIL;
1272bbf45ba5SHollis Blanchard 
1273bbf45ba5SHollis Blanchard 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1274bbf45ba5SHollis Blanchard 	run->mmio.len = bytes;
1275bbf45ba5SHollis Blanchard 	run->mmio.is_write = 0;
1276bbf45ba5SHollis Blanchard 
1277bbf45ba5SHollis Blanchard 	vcpu->arch.io_gpr = rt;
1278d078eed3SDavid Gibson 	vcpu->arch.mmio_host_swabbed = host_swabbed;
1279bbf45ba5SHollis Blanchard 	vcpu->mmio_needed = 1;
1280bbf45ba5SHollis Blanchard 	vcpu->mmio_is_write = 0;
1281eb8b0560SPaul Mackerras 	vcpu->arch.mmio_sign_extend = sign_extend;
1282bbf45ba5SHollis Blanchard 
1283ed840ee9SScott Wood 	idx = srcu_read_lock(&vcpu->kvm->srcu);
1284ed840ee9SScott Wood 
1285e32edf4fSNikolay Nikolaev 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1286ed840ee9SScott Wood 			      bytes, &run->mmio.data);
1287ed840ee9SScott Wood 
1288ed840ee9SScott Wood 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
1289ed840ee9SScott Wood 
1290ed840ee9SScott Wood 	if (!ret) {
12918c99d345STianjia Zhang 		kvmppc_complete_mmio_load(vcpu);
12920e673fb6SAlexander Graf 		vcpu->mmio_needed = 0;
12930e673fb6SAlexander Graf 		return EMULATE_DONE;
12940e673fb6SAlexander Graf 	}
12950e673fb6SAlexander Graf 
1296bbf45ba5SHollis Blanchard 	return EMULATE_DO_MMIO;
1297bbf45ba5SHollis Blanchard }
1298eb8b0560SPaul Mackerras 
kvmppc_handle_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian)12998c99d345STianjia Zhang int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1300eb8b0560SPaul Mackerras 		       unsigned int rt, unsigned int bytes,
1301eb8b0560SPaul Mackerras 		       int is_default_endian)
1302eb8b0560SPaul Mackerras {
13038c99d345STianjia Zhang 	return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1304eb8b0560SPaul Mackerras }
13052ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1306bbf45ba5SHollis Blanchard 
13073587d534SAlexander Graf /* Same as above, but sign extends */
kvmppc_handle_loads(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian)13088c99d345STianjia Zhang int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
130973601775SCédric Le Goater 			unsigned int rt, unsigned int bytes,
131073601775SCédric Le Goater 			int is_default_endian)
13113587d534SAlexander Graf {
13128c99d345STianjia Zhang 	return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
13133587d534SAlexander Graf }
13143587d534SAlexander Graf 
13156f63e81bSBin Lu #ifdef CONFIG_VSX
kvmppc_handle_vsx_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian,int mmio_sign_extend)13168c99d345STianjia Zhang int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
13176f63e81bSBin Lu 			unsigned int rt, unsigned int bytes,
13186f63e81bSBin Lu 			int is_default_endian, int mmio_sign_extend)
13196f63e81bSBin Lu {
13206f63e81bSBin Lu 	enum emulation_result emulated = EMULATE_DONE;
13216f63e81bSBin Lu 
13229aa6825bSPaul Mackerras 	/* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
13239aa6825bSPaul Mackerras 	if (vcpu->arch.mmio_vsx_copy_nums > 4)
13246f63e81bSBin Lu 		return EMULATE_FAIL;
13256f63e81bSBin Lu 
13266f63e81bSBin Lu 	while (vcpu->arch.mmio_vsx_copy_nums) {
13278c99d345STianjia Zhang 		emulated = __kvmppc_handle_load(vcpu, rt, bytes,
13286f63e81bSBin Lu 			is_default_endian, mmio_sign_extend);
13296f63e81bSBin Lu 
13306f63e81bSBin Lu 		if (emulated != EMULATE_DONE)
13316f63e81bSBin Lu 			break;
13326f63e81bSBin Lu 
13338c99d345STianjia Zhang 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
13346f63e81bSBin Lu 
13356f63e81bSBin Lu 		vcpu->arch.mmio_vsx_copy_nums--;
13366f63e81bSBin Lu 		vcpu->arch.mmio_vsx_offset++;
13376f63e81bSBin Lu 	}
13386f63e81bSBin Lu 	return emulated;
13396f63e81bSBin Lu }
13406f63e81bSBin Lu #endif /* CONFIG_VSX */
13416f63e81bSBin Lu 
kvmppc_handle_store(struct kvm_vcpu * vcpu,u64 val,unsigned int bytes,int is_default_endian)13428c99d345STianjia Zhang int kvmppc_handle_store(struct kvm_vcpu *vcpu,
134373601775SCédric Le Goater 			u64 val, unsigned int bytes, int is_default_endian)
1344bbf45ba5SHollis Blanchard {
13458c99d345STianjia Zhang 	struct kvm_run *run = vcpu->run;
1346bbf45ba5SHollis Blanchard 	void *data = run->mmio.data;
1347ed840ee9SScott Wood 	int idx, ret;
1348d078eed3SDavid Gibson 	bool host_swabbed;
134973601775SCédric Le Goater 
1350d078eed3SDavid Gibson 	/* Pity C doesn't have a logical XOR operator */
135173601775SCédric Le Goater 	if (kvmppc_need_byteswap(vcpu)) {
1352d078eed3SDavid Gibson 		host_swabbed = is_default_endian;
135373601775SCédric Le Goater 	} else {
1354d078eed3SDavid Gibson 		host_swabbed = !is_default_endian;
135573601775SCédric Le Goater 	}
1356bbf45ba5SHollis Blanchard 
13573f831504SFabiano Rosas 	if (bytes > sizeof(run->mmio.data))
13583f831504SFabiano Rosas 		return EMULATE_FAIL;
1359bbf45ba5SHollis Blanchard 
1360bbf45ba5SHollis Blanchard 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1361bbf45ba5SHollis Blanchard 	run->mmio.len = bytes;
1362bbf45ba5SHollis Blanchard 	run->mmio.is_write = 1;
1363bbf45ba5SHollis Blanchard 	vcpu->mmio_needed = 1;
1364bbf45ba5SHollis Blanchard 	vcpu->mmio_is_write = 1;
1365bbf45ba5SHollis Blanchard 
13666f63e81bSBin Lu 	if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
13676f63e81bSBin Lu 		val = dp_to_sp(val);
13686f63e81bSBin Lu 
1369bbf45ba5SHollis Blanchard 	/* Store the value at the lowest bytes in 'data'. */
1370d078eed3SDavid Gibson 	if (!host_swabbed) {
1371bbf45ba5SHollis Blanchard 		switch (bytes) {
1372b104d066SAlexander Graf 		case 8: *(u64 *)data = val; break;
1373bbf45ba5SHollis Blanchard 		case 4: *(u32 *)data = val; break;
1374bbf45ba5SHollis Blanchard 		case 2: *(u16 *)data = val; break;
1375bbf45ba5SHollis Blanchard 		case 1: *(u8  *)data = val; break;
1376bbf45ba5SHollis Blanchard 		}
1377bbf45ba5SHollis Blanchard 	} else {
1378bbf45ba5SHollis Blanchard 		switch (bytes) {
1379d078eed3SDavid Gibson 		case 8: *(u64 *)data = swab64(val); break;
1380d078eed3SDavid Gibson 		case 4: *(u32 *)data = swab32(val); break;
1381d078eed3SDavid Gibson 		case 2: *(u16 *)data = swab16(val); break;
1382bbf45ba5SHollis Blanchard 		case 1: *(u8  *)data = val; break;
1383bbf45ba5SHollis Blanchard 		}
1384bbf45ba5SHollis Blanchard 	}
1385bbf45ba5SHollis Blanchard 
1386ed840ee9SScott Wood 	idx = srcu_read_lock(&vcpu->kvm->srcu);
1387ed840ee9SScott Wood 
1388e32edf4fSNikolay Nikolaev 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1389ed840ee9SScott Wood 			       bytes, &run->mmio.data);
1390ed840ee9SScott Wood 
1391ed840ee9SScott Wood 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
1392ed840ee9SScott Wood 
1393ed840ee9SScott Wood 	if (!ret) {
13940e673fb6SAlexander Graf 		vcpu->mmio_needed = 0;
13950e673fb6SAlexander Graf 		return EMULATE_DONE;
13960e673fb6SAlexander Graf 	}
13970e673fb6SAlexander Graf 
1398bbf45ba5SHollis Blanchard 	return EMULATE_DO_MMIO;
1399bbf45ba5SHollis Blanchard }
14002ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1401bbf45ba5SHollis Blanchard 
14026f63e81bSBin Lu #ifdef CONFIG_VSX
kvmppc_get_vsr_data(struct kvm_vcpu * vcpu,int rs,u64 * val)14036f63e81bSBin Lu static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
14046f63e81bSBin Lu {
14056f63e81bSBin Lu 	u32 dword_offset, word_offset;
14066f63e81bSBin Lu 	union kvmppc_one_reg reg;
14076f63e81bSBin Lu 	int vsx_offset = 0;
1408da2a32b8SSimon Guo 	int copy_type = vcpu->arch.mmio_copy_type;
14096f63e81bSBin Lu 	int result = 0;
14106f63e81bSBin Lu 
14116f63e81bSBin Lu 	switch (copy_type) {
14126f63e81bSBin Lu 	case KVMPPC_VSX_COPY_DWORD:
14136f63e81bSBin Lu 		vsx_offset =
14146f63e81bSBin Lu 			kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
14156f63e81bSBin Lu 
14166f63e81bSBin Lu 		if (vsx_offset == -1) {
14176f63e81bSBin Lu 			result = -1;
14186f63e81bSBin Lu 			break;
14196f63e81bSBin Lu 		}
14206f63e81bSBin Lu 
14214eeb8556SSimon Guo 		if (rs < 32) {
14226f63e81bSBin Lu 			*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
14236f63e81bSBin Lu 		} else {
14244eeb8556SSimon Guo 			reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
14256f63e81bSBin Lu 			*val = reg.vsxval[vsx_offset];
14266f63e81bSBin Lu 		}
14276f63e81bSBin Lu 		break;
14286f63e81bSBin Lu 
14296f63e81bSBin Lu 	case KVMPPC_VSX_COPY_WORD:
14306f63e81bSBin Lu 		vsx_offset =
14316f63e81bSBin Lu 			kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
14326f63e81bSBin Lu 
14336f63e81bSBin Lu 		if (vsx_offset == -1) {
14346f63e81bSBin Lu 			result = -1;
14356f63e81bSBin Lu 			break;
14366f63e81bSBin Lu 		}
14376f63e81bSBin Lu 
14384eeb8556SSimon Guo 		if (rs < 32) {
14396f63e81bSBin Lu 			dword_offset = vsx_offset / 2;
14406f63e81bSBin Lu 			word_offset = vsx_offset % 2;
14416f63e81bSBin Lu 			reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
14426f63e81bSBin Lu 			*val = reg.vsx32val[word_offset];
14436f63e81bSBin Lu 		} else {
14444eeb8556SSimon Guo 			reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
14456f63e81bSBin Lu 			*val = reg.vsx32val[vsx_offset];
14466f63e81bSBin Lu 		}
14476f63e81bSBin Lu 		break;
14486f63e81bSBin Lu 
14496f63e81bSBin Lu 	default:
14506f63e81bSBin Lu 		result = -1;
14516f63e81bSBin Lu 		break;
14526f63e81bSBin Lu 	}
14536f63e81bSBin Lu 
14546f63e81bSBin Lu 	return result;
14556f63e81bSBin Lu }
14566f63e81bSBin Lu 
kvmppc_handle_vsx_store(struct kvm_vcpu * vcpu,int rs,unsigned int bytes,int is_default_endian)14578c99d345STianjia Zhang int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
14586f63e81bSBin Lu 			int rs, unsigned int bytes, int is_default_endian)
14596f63e81bSBin Lu {
14606f63e81bSBin Lu 	u64 val;
14616f63e81bSBin Lu 	enum emulation_result emulated = EMULATE_DONE;
14626f63e81bSBin Lu 
14636f63e81bSBin Lu 	vcpu->arch.io_gpr = rs;
14646f63e81bSBin Lu 
14659aa6825bSPaul Mackerras 	/* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
14669aa6825bSPaul Mackerras 	if (vcpu->arch.mmio_vsx_copy_nums > 4)
14676f63e81bSBin Lu 		return EMULATE_FAIL;
14686f63e81bSBin Lu 
14696f63e81bSBin Lu 	while (vcpu->arch.mmio_vsx_copy_nums) {
14706f63e81bSBin Lu 		if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
14716f63e81bSBin Lu 			return EMULATE_FAIL;
14726f63e81bSBin Lu 
14738c99d345STianjia Zhang 		emulated = kvmppc_handle_store(vcpu,
14746f63e81bSBin Lu 			 val, bytes, is_default_endian);
14756f63e81bSBin Lu 
14766f63e81bSBin Lu 		if (emulated != EMULATE_DONE)
14776f63e81bSBin Lu 			break;
14786f63e81bSBin Lu 
14798c99d345STianjia Zhang 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
14806f63e81bSBin Lu 
14816f63e81bSBin Lu 		vcpu->arch.mmio_vsx_copy_nums--;
14826f63e81bSBin Lu 		vcpu->arch.mmio_vsx_offset++;
14836f63e81bSBin Lu 	}
14846f63e81bSBin Lu 
14856f63e81bSBin Lu 	return emulated;
14866f63e81bSBin Lu }
14876f63e81bSBin Lu 
kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu * vcpu)14888c99d345STianjia Zhang static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
14896f63e81bSBin Lu {
14908c99d345STianjia Zhang 	struct kvm_run *run = vcpu->run;
14916f63e81bSBin Lu 	enum emulation_result emulated = EMULATE_FAIL;
14926f63e81bSBin Lu 	int r;
14936f63e81bSBin Lu 
14946f63e81bSBin Lu 	vcpu->arch.paddr_accessed += run->mmio.len;
14956f63e81bSBin Lu 
14966f63e81bSBin Lu 	if (!vcpu->mmio_is_write) {
14978c99d345STianjia Zhang 		emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
14986f63e81bSBin Lu 			 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
14996f63e81bSBin Lu 	} else {
15008c99d345STianjia Zhang 		emulated = kvmppc_handle_vsx_store(vcpu,
15016f63e81bSBin Lu 			 vcpu->arch.io_gpr, run->mmio.len, 1);
15026f63e81bSBin Lu 	}
15036f63e81bSBin Lu 
15046f63e81bSBin Lu 	switch (emulated) {
15056f63e81bSBin Lu 	case EMULATE_DO_MMIO:
15066f63e81bSBin Lu 		run->exit_reason = KVM_EXIT_MMIO;
15076f63e81bSBin Lu 		r = RESUME_HOST;
15086f63e81bSBin Lu 		break;
15096f63e81bSBin Lu 	case EMULATE_FAIL:
15106f63e81bSBin Lu 		pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
15116f63e81bSBin Lu 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
15126f63e81bSBin Lu 		run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
15136f63e81bSBin Lu 		r = RESUME_HOST;
15146f63e81bSBin Lu 		break;
15156f63e81bSBin Lu 	default:
15166f63e81bSBin Lu 		r = RESUME_GUEST;
15176f63e81bSBin Lu 		break;
15186f63e81bSBin Lu 	}
15196f63e81bSBin Lu 	return r;
15206f63e81bSBin Lu }
15216f63e81bSBin Lu #endif /* CONFIG_VSX */
15226f63e81bSBin Lu 
152309f98496SJose Ricardo Ziviani #ifdef CONFIG_ALTIVEC
kvmppc_handle_vmx_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian)15248c99d345STianjia Zhang int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1525acc9eb93SSimon Guo 		unsigned int rt, unsigned int bytes, int is_default_endian)
152609f98496SJose Ricardo Ziviani {
15276df3877fSPaul Mackerras 	enum emulation_result emulated = EMULATE_DONE;
152809f98496SJose Ricardo Ziviani 
1529b99234b9SFabiano Rosas 	if (vcpu->arch.mmio_vmx_copy_nums > 2)
1530acc9eb93SSimon Guo 		return EMULATE_FAIL;
1531acc9eb93SSimon Guo 
153209f98496SJose Ricardo Ziviani 	while (vcpu->arch.mmio_vmx_copy_nums) {
15338c99d345STianjia Zhang 		emulated = __kvmppc_handle_load(vcpu, rt, bytes,
153409f98496SJose Ricardo Ziviani 				is_default_endian, 0);
153509f98496SJose Ricardo Ziviani 
153609f98496SJose Ricardo Ziviani 		if (emulated != EMULATE_DONE)
153709f98496SJose Ricardo Ziviani 			break;
153809f98496SJose Ricardo Ziviani 
15398c99d345STianjia Zhang 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
154009f98496SJose Ricardo Ziviani 		vcpu->arch.mmio_vmx_copy_nums--;
1541acc9eb93SSimon Guo 		vcpu->arch.mmio_vmx_offset++;
154209f98496SJose Ricardo Ziviani 	}
154309f98496SJose Ricardo Ziviani 
154409f98496SJose Ricardo Ziviani 	return emulated;
154509f98496SJose Ricardo Ziviani }
154609f98496SJose Ricardo Ziviani 
kvmppc_get_vmx_dword(struct kvm_vcpu * vcpu,int index,u64 * val)15479236f57aSCédric Le Goater static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
154809f98496SJose Ricardo Ziviani {
1549acc9eb93SSimon Guo 	union kvmppc_one_reg reg;
1550acc9eb93SSimon Guo 	int vmx_offset = 0;
1551acc9eb93SSimon Guo 	int result = 0;
155209f98496SJose Ricardo Ziviani 
1553acc9eb93SSimon Guo 	vmx_offset =
1554acc9eb93SSimon Guo 		kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1555acc9eb93SSimon Guo 
1556acc9eb93SSimon Guo 	if (vmx_offset == -1)
155709f98496SJose Ricardo Ziviani 		return -1;
155809f98496SJose Ricardo Ziviani 
1559acc9eb93SSimon Guo 	reg.vval = VCPU_VSX_VR(vcpu, index);
1560acc9eb93SSimon Guo 	*val = reg.vsxval[vmx_offset];
156109f98496SJose Ricardo Ziviani 
1562acc9eb93SSimon Guo 	return result;
156309f98496SJose Ricardo Ziviani }
156409f98496SJose Ricardo Ziviani 
kvmppc_get_vmx_word(struct kvm_vcpu * vcpu,int index,u64 * val)15659236f57aSCédric Le Goater static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1566acc9eb93SSimon Guo {
1567acc9eb93SSimon Guo 	union kvmppc_one_reg reg;
1568acc9eb93SSimon Guo 	int vmx_offset = 0;
1569acc9eb93SSimon Guo 	int result = 0;
1570acc9eb93SSimon Guo 
1571acc9eb93SSimon Guo 	vmx_offset =
1572acc9eb93SSimon Guo 		kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1573acc9eb93SSimon Guo 
1574acc9eb93SSimon Guo 	if (vmx_offset == -1)
1575acc9eb93SSimon Guo 		return -1;
1576acc9eb93SSimon Guo 
1577acc9eb93SSimon Guo 	reg.vval = VCPU_VSX_VR(vcpu, index);
1578acc9eb93SSimon Guo 	*val = reg.vsx32val[vmx_offset];
1579acc9eb93SSimon Guo 
1580acc9eb93SSimon Guo 	return result;
1581acc9eb93SSimon Guo }
1582acc9eb93SSimon Guo 
kvmppc_get_vmx_hword(struct kvm_vcpu * vcpu,int index,u64 * val)15839236f57aSCédric Le Goater static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1584acc9eb93SSimon Guo {
1585acc9eb93SSimon Guo 	union kvmppc_one_reg reg;
1586acc9eb93SSimon Guo 	int vmx_offset = 0;
1587acc9eb93SSimon Guo 	int result = 0;
1588acc9eb93SSimon Guo 
1589acc9eb93SSimon Guo 	vmx_offset =
1590acc9eb93SSimon Guo 		kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1591acc9eb93SSimon Guo 
1592acc9eb93SSimon Guo 	if (vmx_offset == -1)
1593acc9eb93SSimon Guo 		return -1;
1594acc9eb93SSimon Guo 
1595acc9eb93SSimon Guo 	reg.vval = VCPU_VSX_VR(vcpu, index);
1596acc9eb93SSimon Guo 	*val = reg.vsx16val[vmx_offset];
1597acc9eb93SSimon Guo 
1598acc9eb93SSimon Guo 	return result;
1599acc9eb93SSimon Guo }
1600acc9eb93SSimon Guo 
kvmppc_get_vmx_byte(struct kvm_vcpu * vcpu,int index,u64 * val)16019236f57aSCédric Le Goater static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1602acc9eb93SSimon Guo {
1603acc9eb93SSimon Guo 	union kvmppc_one_reg reg;
1604acc9eb93SSimon Guo 	int vmx_offset = 0;
1605acc9eb93SSimon Guo 	int result = 0;
1606acc9eb93SSimon Guo 
1607acc9eb93SSimon Guo 	vmx_offset =
1608acc9eb93SSimon Guo 		kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1609acc9eb93SSimon Guo 
1610acc9eb93SSimon Guo 	if (vmx_offset == -1)
1611acc9eb93SSimon Guo 		return -1;
1612acc9eb93SSimon Guo 
1613acc9eb93SSimon Guo 	reg.vval = VCPU_VSX_VR(vcpu, index);
1614acc9eb93SSimon Guo 	*val = reg.vsx8val[vmx_offset];
1615acc9eb93SSimon Guo 
1616acc9eb93SSimon Guo 	return result;
1617acc9eb93SSimon Guo }
1618acc9eb93SSimon Guo 
kvmppc_handle_vmx_store(struct kvm_vcpu * vcpu,unsigned int rs,unsigned int bytes,int is_default_endian)16198c99d345STianjia Zhang int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1620acc9eb93SSimon Guo 		unsigned int rs, unsigned int bytes, int is_default_endian)
162109f98496SJose Ricardo Ziviani {
162209f98496SJose Ricardo Ziviani 	u64 val = 0;
1623acc9eb93SSimon Guo 	unsigned int index = rs & KVM_MMIO_REG_MASK;
162409f98496SJose Ricardo Ziviani 	enum emulation_result emulated = EMULATE_DONE;
162509f98496SJose Ricardo Ziviani 
1626b99234b9SFabiano Rosas 	if (vcpu->arch.mmio_vmx_copy_nums > 2)
1627acc9eb93SSimon Guo 		return EMULATE_FAIL;
1628acc9eb93SSimon Guo 
162909f98496SJose Ricardo Ziviani 	vcpu->arch.io_gpr = rs;
163009f98496SJose Ricardo Ziviani 
163109f98496SJose Ricardo Ziviani 	while (vcpu->arch.mmio_vmx_copy_nums) {
1632acc9eb93SSimon Guo 		switch (vcpu->arch.mmio_copy_type) {
1633acc9eb93SSimon Guo 		case KVMPPC_VMX_COPY_DWORD:
1634acc9eb93SSimon Guo 			if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
163509f98496SJose Ricardo Ziviani 				return EMULATE_FAIL;
163609f98496SJose Ricardo Ziviani 
1637acc9eb93SSimon Guo 			break;
1638acc9eb93SSimon Guo 		case KVMPPC_VMX_COPY_WORD:
1639acc9eb93SSimon Guo 			if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1640acc9eb93SSimon Guo 				return EMULATE_FAIL;
1641acc9eb93SSimon Guo 			break;
1642acc9eb93SSimon Guo 		case KVMPPC_VMX_COPY_HWORD:
1643acc9eb93SSimon Guo 			if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1644acc9eb93SSimon Guo 				return EMULATE_FAIL;
1645acc9eb93SSimon Guo 			break;
1646acc9eb93SSimon Guo 		case KVMPPC_VMX_COPY_BYTE:
1647acc9eb93SSimon Guo 			if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1648acc9eb93SSimon Guo 				return EMULATE_FAIL;
1649acc9eb93SSimon Guo 			break;
1650acc9eb93SSimon Guo 		default:
1651acc9eb93SSimon Guo 			return EMULATE_FAIL;
1652acc9eb93SSimon Guo 		}
1653acc9eb93SSimon Guo 
16548c99d345STianjia Zhang 		emulated = kvmppc_handle_store(vcpu, val, bytes,
165509f98496SJose Ricardo Ziviani 				is_default_endian);
165609f98496SJose Ricardo Ziviani 		if (emulated != EMULATE_DONE)
165709f98496SJose Ricardo Ziviani 			break;
165809f98496SJose Ricardo Ziviani 
16598c99d345STianjia Zhang 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
166009f98496SJose Ricardo Ziviani 		vcpu->arch.mmio_vmx_copy_nums--;
1661acc9eb93SSimon Guo 		vcpu->arch.mmio_vmx_offset++;
166209f98496SJose Ricardo Ziviani 	}
166309f98496SJose Ricardo Ziviani 
166409f98496SJose Ricardo Ziviani 	return emulated;
166509f98496SJose Ricardo Ziviani }
166609f98496SJose Ricardo Ziviani 
kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu * vcpu)16678c99d345STianjia Zhang static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
166809f98496SJose Ricardo Ziviani {
16698c99d345STianjia Zhang 	struct kvm_run *run = vcpu->run;
167009f98496SJose Ricardo Ziviani 	enum emulation_result emulated = EMULATE_FAIL;
167109f98496SJose Ricardo Ziviani 	int r;
167209f98496SJose Ricardo Ziviani 
167309f98496SJose Ricardo Ziviani 	vcpu->arch.paddr_accessed += run->mmio.len;
167409f98496SJose Ricardo Ziviani 
167509f98496SJose Ricardo Ziviani 	if (!vcpu->mmio_is_write) {
16768c99d345STianjia Zhang 		emulated = kvmppc_handle_vmx_load(vcpu,
1677acc9eb93SSimon Guo 				vcpu->arch.io_gpr, run->mmio.len, 1);
167809f98496SJose Ricardo Ziviani 	} else {
16798c99d345STianjia Zhang 		emulated = kvmppc_handle_vmx_store(vcpu,
1680acc9eb93SSimon Guo 				vcpu->arch.io_gpr, run->mmio.len, 1);
168109f98496SJose Ricardo Ziviani 	}
168209f98496SJose Ricardo Ziviani 
168309f98496SJose Ricardo Ziviani 	switch (emulated) {
168409f98496SJose Ricardo Ziviani 	case EMULATE_DO_MMIO:
168509f98496SJose Ricardo Ziviani 		run->exit_reason = KVM_EXIT_MMIO;
168609f98496SJose Ricardo Ziviani 		r = RESUME_HOST;
168709f98496SJose Ricardo Ziviani 		break;
168809f98496SJose Ricardo Ziviani 	case EMULATE_FAIL:
168909f98496SJose Ricardo Ziviani 		pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
169009f98496SJose Ricardo Ziviani 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
169109f98496SJose Ricardo Ziviani 		run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
169209f98496SJose Ricardo Ziviani 		r = RESUME_HOST;
169309f98496SJose Ricardo Ziviani 		break;
169409f98496SJose Ricardo Ziviani 	default:
169509f98496SJose Ricardo Ziviani 		r = RESUME_GUEST;
169609f98496SJose Ricardo Ziviani 		break;
169709f98496SJose Ricardo Ziviani 	}
169809f98496SJose Ricardo Ziviani 	return r;
169909f98496SJose Ricardo Ziviani }
170009f98496SJose Ricardo Ziviani #endif /* CONFIG_ALTIVEC */
170109f98496SJose Ricardo Ziviani 
kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)17028a41ea53SMihai Caraman int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
17038a41ea53SMihai Caraman {
17048a41ea53SMihai Caraman 	int r = 0;
17058a41ea53SMihai Caraman 	union kvmppc_one_reg val;
17068a41ea53SMihai Caraman 	int size;
17078a41ea53SMihai Caraman 
17088a41ea53SMihai Caraman 	size = one_reg_size(reg->id);
17098a41ea53SMihai Caraman 	if (size > sizeof(val))
17108a41ea53SMihai Caraman 		return -EINVAL;
17118a41ea53SMihai Caraman 
17128a41ea53SMihai Caraman 	r = kvmppc_get_one_reg(vcpu, reg->id, &val);
17138a41ea53SMihai Caraman 	if (r == -EINVAL) {
17148a41ea53SMihai Caraman 		r = 0;
17158a41ea53SMihai Caraman 		switch (reg->id) {
17163840edc8SMihai Caraman #ifdef CONFIG_ALTIVEC
17173840edc8SMihai Caraman 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
17183840edc8SMihai Caraman 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
17193840edc8SMihai Caraman 				r = -ENXIO;
17203840edc8SMihai Caraman 				break;
17213840edc8SMihai Caraman 			}
1722b4d7f161SGreg Kurz 			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
17233840edc8SMihai Caraman 			break;
17243840edc8SMihai Caraman 		case KVM_REG_PPC_VSCR:
17253840edc8SMihai Caraman 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
17263840edc8SMihai Caraman 				r = -ENXIO;
17273840edc8SMihai Caraman 				break;
17283840edc8SMihai Caraman 			}
1729b4d7f161SGreg Kurz 			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
17303840edc8SMihai Caraman 			break;
17313840edc8SMihai Caraman 		case KVM_REG_PPC_VRSAVE:
1732b4d7f161SGreg Kurz 			val = get_reg_val(reg->id, vcpu->arch.vrsave);
17333840edc8SMihai Caraman 			break;
17343840edc8SMihai Caraman #endif /* CONFIG_ALTIVEC */
17358a41ea53SMihai Caraman 		default:
17368a41ea53SMihai Caraman 			r = -EINVAL;
17378a41ea53SMihai Caraman 			break;
17388a41ea53SMihai Caraman 		}
17398a41ea53SMihai Caraman 	}
17408a41ea53SMihai Caraman 
17418a41ea53SMihai Caraman 	if (r)
17428a41ea53SMihai Caraman 		return r;
17438a41ea53SMihai Caraman 
17448a41ea53SMihai Caraman 	if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
17458a41ea53SMihai Caraman 		r = -EFAULT;
17468a41ea53SMihai Caraman 
17478a41ea53SMihai Caraman 	return r;
17488a41ea53SMihai Caraman }
17498a41ea53SMihai Caraman 
kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)17508a41ea53SMihai Caraman int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
17518a41ea53SMihai Caraman {
17528a41ea53SMihai Caraman 	int r;
17538a41ea53SMihai Caraman 	union kvmppc_one_reg val;
17548a41ea53SMihai Caraman 	int size;
17558a41ea53SMihai Caraman 
17568a41ea53SMihai Caraman 	size = one_reg_size(reg->id);
17578a41ea53SMihai Caraman 	if (size > sizeof(val))
17588a41ea53SMihai Caraman 		return -EINVAL;
17598a41ea53SMihai Caraman 
17608a41ea53SMihai Caraman 	if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
17618a41ea53SMihai Caraman 		return -EFAULT;
17628a41ea53SMihai Caraman 
17638a41ea53SMihai Caraman 	r = kvmppc_set_one_reg(vcpu, reg->id, &val);
17648a41ea53SMihai Caraman 	if (r == -EINVAL) {
17658a41ea53SMihai Caraman 		r = 0;
17668a41ea53SMihai Caraman 		switch (reg->id) {
17673840edc8SMihai Caraman #ifdef CONFIG_ALTIVEC
17683840edc8SMihai Caraman 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
17693840edc8SMihai Caraman 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
17703840edc8SMihai Caraman 				r = -ENXIO;
17713840edc8SMihai Caraman 				break;
17723840edc8SMihai Caraman 			}
1773b4d7f161SGreg Kurz 			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
17743840edc8SMihai Caraman 			break;
17753840edc8SMihai Caraman 		case KVM_REG_PPC_VSCR:
17763840edc8SMihai Caraman 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
17773840edc8SMihai Caraman 				r = -ENXIO;
17783840edc8SMihai Caraman 				break;
17793840edc8SMihai Caraman 			}
1780b4d7f161SGreg Kurz 			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
17813840edc8SMihai Caraman 			break;
17823840edc8SMihai Caraman 		case KVM_REG_PPC_VRSAVE:
1783b4d7f161SGreg Kurz 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1784b4d7f161SGreg Kurz 				r = -ENXIO;
1785b4d7f161SGreg Kurz 				break;
1786b4d7f161SGreg Kurz 			}
1787b4d7f161SGreg Kurz 			vcpu->arch.vrsave = set_reg_val(reg->id, val);
17883840edc8SMihai Caraman 			break;
17893840edc8SMihai Caraman #endif /* CONFIG_ALTIVEC */
17908a41ea53SMihai Caraman 		default:
17918a41ea53SMihai Caraman 			r = -EINVAL;
17928a41ea53SMihai Caraman 			break;
17938a41ea53SMihai Caraman 		}
17948a41ea53SMihai Caraman 	}
17958a41ea53SMihai Caraman 
17968a41ea53SMihai Caraman 	return r;
17978a41ea53SMihai Caraman }
17988a41ea53SMihai Caraman 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)17991b94f6f8STianjia Zhang int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1800bbf45ba5SHollis Blanchard {
18011b94f6f8STianjia Zhang 	struct kvm_run *run = vcpu->run;
1802bbf45ba5SHollis Blanchard 	int r;
1803bbf45ba5SHollis Blanchard 
1804accb757dSChristoffer Dall 	vcpu_load(vcpu);
1805accb757dSChristoffer Dall 
1806bbf45ba5SHollis Blanchard 	if (vcpu->mmio_needed) {
18076f63e81bSBin Lu 		vcpu->mmio_needed = 0;
1808bbf45ba5SHollis Blanchard 		if (!vcpu->mmio_is_write)
18098c99d345STianjia Zhang 			kvmppc_complete_mmio_load(vcpu);
18106f63e81bSBin Lu #ifdef CONFIG_VSX
18116f63e81bSBin Lu 		if (vcpu->arch.mmio_vsx_copy_nums > 0) {
18126f63e81bSBin Lu 			vcpu->arch.mmio_vsx_copy_nums--;
18136f63e81bSBin Lu 			vcpu->arch.mmio_vsx_offset++;
18146f63e81bSBin Lu 		}
18156f63e81bSBin Lu 
18166f63e81bSBin Lu 		if (vcpu->arch.mmio_vsx_copy_nums > 0) {
18178c99d345STianjia Zhang 			r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
18186f63e81bSBin Lu 			if (r == RESUME_HOST) {
18196f63e81bSBin Lu 				vcpu->mmio_needed = 1;
1820accb757dSChristoffer Dall 				goto out;
18216f63e81bSBin Lu 			}
18226f63e81bSBin Lu 		}
18236f63e81bSBin Lu #endif
182409f98496SJose Ricardo Ziviani #ifdef CONFIG_ALTIVEC
1825acc9eb93SSimon Guo 		if (vcpu->arch.mmio_vmx_copy_nums > 0) {
182609f98496SJose Ricardo Ziviani 			vcpu->arch.mmio_vmx_copy_nums--;
1827acc9eb93SSimon Guo 			vcpu->arch.mmio_vmx_offset++;
1828acc9eb93SSimon Guo 		}
182909f98496SJose Ricardo Ziviani 
183009f98496SJose Ricardo Ziviani 		if (vcpu->arch.mmio_vmx_copy_nums > 0) {
18318c99d345STianjia Zhang 			r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
183209f98496SJose Ricardo Ziviani 			if (r == RESUME_HOST) {
183309f98496SJose Ricardo Ziviani 				vcpu->mmio_needed = 1;
18341ab03c07SRadim Krčmář 				goto out;
183509f98496SJose Ricardo Ziviani 			}
183609f98496SJose Ricardo Ziviani 		}
183709f98496SJose Ricardo Ziviani #endif
1838ad0a048bSAlexander Graf 	} else if (vcpu->arch.osi_needed) {
1839ad0a048bSAlexander Graf 		u64 *gprs = run->osi.gprs;
1840ad0a048bSAlexander Graf 		int i;
1841ad0a048bSAlexander Graf 
1842ad0a048bSAlexander Graf 		for (i = 0; i < 32; i++)
1843ad0a048bSAlexander Graf 			kvmppc_set_gpr(vcpu, i, gprs[i]);
1844ad0a048bSAlexander Graf 		vcpu->arch.osi_needed = 0;
1845de56a948SPaul Mackerras 	} else if (vcpu->arch.hcall_needed) {
1846de56a948SPaul Mackerras 		int i;
1847de56a948SPaul Mackerras 
1848de56a948SPaul Mackerras 		kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1849de56a948SPaul Mackerras 		for (i = 0; i < 9; ++i)
1850de56a948SPaul Mackerras 			kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1851de56a948SPaul Mackerras 		vcpu->arch.hcall_needed = 0;
18521c810636SAlexander Graf #ifdef CONFIG_BOOKE
18531c810636SAlexander Graf 	} else if (vcpu->arch.epr_needed) {
18541c810636SAlexander Graf 		kvmppc_set_epr(vcpu, run->epr.epr);
18551c810636SAlexander Graf 		vcpu->arch.epr_needed = 0;
18561c810636SAlexander Graf #endif
1857bbf45ba5SHollis Blanchard 	}
1858bbf45ba5SHollis Blanchard 
185920b7035cSJan H. Schönherr 	kvm_sigset_activate(vcpu);
18606f63e81bSBin Lu 
1861460df4c1SPaolo Bonzini 	if (run->immediate_exit)
1862460df4c1SPaolo Bonzini 		r = -EINTR;
1863460df4c1SPaolo Bonzini 	else
18648c99d345STianjia Zhang 		r = kvmppc_vcpu_run(vcpu);
1865bbf45ba5SHollis Blanchard 
186620b7035cSJan H. Schönherr 	kvm_sigset_deactivate(vcpu);
1867bbf45ba5SHollis Blanchard 
1868c662f773SPaul Mackerras #ifdef CONFIG_ALTIVEC
1869accb757dSChristoffer Dall out:
1870c662f773SPaul Mackerras #endif
187136d014d3SFabiano Rosas 
187236d014d3SFabiano Rosas 	/*
187336d014d3SFabiano Rosas 	 * We're already returning to userspace, don't pass the
187436d014d3SFabiano Rosas 	 * RESUME_HOST flags along.
187536d014d3SFabiano Rosas 	 */
187636d014d3SFabiano Rosas 	if (r > 0)
187736d014d3SFabiano Rosas 		r = 0;
187836d014d3SFabiano Rosas 
1879accb757dSChristoffer Dall 	vcpu_put(vcpu);
1880bbf45ba5SHollis Blanchard 	return r;
1881bbf45ba5SHollis Blanchard }
1882bbf45ba5SHollis Blanchard 
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1883bbf45ba5SHollis Blanchard int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1884bbf45ba5SHollis Blanchard {
188519ccb76aSPaul Mackerras 	if (irq->irq == KVM_INTERRUPT_UNSET) {
18864fe27d2aSPaul Mackerras 		kvmppc_core_dequeue_external(vcpu);
188719ccb76aSPaul Mackerras 		return 0;
188819ccb76aSPaul Mackerras 	}
188919ccb76aSPaul Mackerras 
18909dd921cfSHollis Blanchard 	kvmppc_core_queue_external(vcpu, irq);
1891b6d33834SChristoffer Dall 
1892dfd4d47eSScott Wood 	kvm_vcpu_kick(vcpu);
189345c5eb67SHollis Blanchard 
1894bbf45ba5SHollis Blanchard 	return 0;
1895bbf45ba5SHollis Blanchard }
1896bbf45ba5SHollis Blanchard 
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)189771fbfd5fSAlexander Graf static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
189871fbfd5fSAlexander Graf 				     struct kvm_enable_cap *cap)
189971fbfd5fSAlexander Graf {
190071fbfd5fSAlexander Graf 	int r;
190171fbfd5fSAlexander Graf 
190271fbfd5fSAlexander Graf 	if (cap->flags)
190371fbfd5fSAlexander Graf 		return -EINVAL;
190471fbfd5fSAlexander Graf 
190571fbfd5fSAlexander Graf 	switch (cap->cap) {
1906ad0a048bSAlexander Graf 	case KVM_CAP_PPC_OSI:
1907ad0a048bSAlexander Graf 		r = 0;
1908ad0a048bSAlexander Graf 		vcpu->arch.osi_enabled = true;
1909ad0a048bSAlexander Graf 		break;
1910930b412aSAlexander Graf 	case KVM_CAP_PPC_PAPR:
1911930b412aSAlexander Graf 		r = 0;
1912930b412aSAlexander Graf 		vcpu->arch.papr_enabled = true;
1913930b412aSAlexander Graf 		break;
19141c810636SAlexander Graf 	case KVM_CAP_PPC_EPR:
19151c810636SAlexander Graf 		r = 0;
19165df554adSScott Wood 		if (cap->args[0])
19175df554adSScott Wood 			vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
19185df554adSScott Wood 		else
19195df554adSScott Wood 			vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
19201c810636SAlexander Graf 		break;
1921f61c94bbSBharat Bhushan #ifdef CONFIG_BOOKE
1922f61c94bbSBharat Bhushan 	case KVM_CAP_PPC_BOOKE_WATCHDOG:
1923f61c94bbSBharat Bhushan 		r = 0;
1924f61c94bbSBharat Bhushan 		vcpu->arch.watchdog_enabled = true;
1925f61c94bbSBharat Bhushan 		break;
1926f61c94bbSBharat Bhushan #endif
1927bf7ca4bdSAlexander Graf #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1928dc83b8bcSScott Wood 	case KVM_CAP_SW_TLB: {
1929dc83b8bcSScott Wood 		struct kvm_config_tlb cfg;
1930dc83b8bcSScott Wood 		void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1931dc83b8bcSScott Wood 
1932dc83b8bcSScott Wood 		r = -EFAULT;
1933dc83b8bcSScott Wood 		if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1934dc83b8bcSScott Wood 			break;
1935dc83b8bcSScott Wood 
1936dc83b8bcSScott Wood 		r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1937dc83b8bcSScott Wood 		break;
1938dc83b8bcSScott Wood 	}
1939dc83b8bcSScott Wood #endif
1940eb1e4f43SScott Wood #ifdef CONFIG_KVM_MPIC
1941eb1e4f43SScott Wood 	case KVM_CAP_IRQ_MPIC: {
194270abadedSAl Viro 		struct fd f;
1943eb1e4f43SScott Wood 		struct kvm_device *dev;
1944eb1e4f43SScott Wood 
1945eb1e4f43SScott Wood 		r = -EBADF;
194670abadedSAl Viro 		f = fdget(cap->args[0]);
194770abadedSAl Viro 		if (!f.file)
1948eb1e4f43SScott Wood 			break;
1949eb1e4f43SScott Wood 
1950eb1e4f43SScott Wood 		r = -EPERM;
195170abadedSAl Viro 		dev = kvm_device_from_filp(f.file);
1952eb1e4f43SScott Wood 		if (dev)
1953eb1e4f43SScott Wood 			r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1954eb1e4f43SScott Wood 
195570abadedSAl Viro 		fdput(f);
1956eb1e4f43SScott Wood 		break;
1957eb1e4f43SScott Wood 	}
1958eb1e4f43SScott Wood #endif
19595975a2e0SPaul Mackerras #ifdef CONFIG_KVM_XICS
19605975a2e0SPaul Mackerras 	case KVM_CAP_IRQ_XICS: {
196170abadedSAl Viro 		struct fd f;
19625975a2e0SPaul Mackerras 		struct kvm_device *dev;
19635975a2e0SPaul Mackerras 
19645975a2e0SPaul Mackerras 		r = -EBADF;
196570abadedSAl Viro 		f = fdget(cap->args[0]);
196670abadedSAl Viro 		if (!f.file)
19675975a2e0SPaul Mackerras 			break;
19685975a2e0SPaul Mackerras 
19695975a2e0SPaul Mackerras 		r = -EPERM;
197070abadedSAl Viro 		dev = kvm_device_from_filp(f.file);
19715af50993SBenjamin Herrenschmidt 		if (dev) {
197203f95332SPaul Mackerras 			if (xics_on_xive())
19735af50993SBenjamin Herrenschmidt 				r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
19745af50993SBenjamin Herrenschmidt 			else
19755975a2e0SPaul Mackerras 				r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
19765af50993SBenjamin Herrenschmidt 		}
19775975a2e0SPaul Mackerras 
197870abadedSAl Viro 		fdput(f);
19795975a2e0SPaul Mackerras 		break;
19805975a2e0SPaul Mackerras 	}
19815975a2e0SPaul Mackerras #endif /* CONFIG_KVM_XICS */
1982eacc56bbSCédric Le Goater #ifdef CONFIG_KVM_XIVE
1983eacc56bbSCédric Le Goater 	case KVM_CAP_PPC_IRQ_XIVE: {
1984eacc56bbSCédric Le Goater 		struct fd f;
1985eacc56bbSCédric Le Goater 		struct kvm_device *dev;
1986eacc56bbSCédric Le Goater 
1987eacc56bbSCédric Le Goater 		r = -EBADF;
1988eacc56bbSCédric Le Goater 		f = fdget(cap->args[0]);
1989eacc56bbSCédric Le Goater 		if (!f.file)
1990eacc56bbSCédric Le Goater 			break;
1991eacc56bbSCédric Le Goater 
1992eacc56bbSCédric Le Goater 		r = -ENXIO;
1993*4f79a18aSAl Viro 		if (!xive_enabled()) {
1994*4f79a18aSAl Viro 			fdput(f);
1995eacc56bbSCédric Le Goater 			break;
1996*4f79a18aSAl Viro 		}
1997eacc56bbSCédric Le Goater 
1998eacc56bbSCédric Le Goater 		r = -EPERM;
1999eacc56bbSCédric Le Goater 		dev = kvm_device_from_filp(f.file);
2000eacc56bbSCédric Le Goater 		if (dev)
2001eacc56bbSCédric Le Goater 			r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
2002eacc56bbSCédric Le Goater 							    cap->args[1]);
2003eacc56bbSCédric Le Goater 
2004eacc56bbSCédric Le Goater 		fdput(f);
2005eacc56bbSCédric Le Goater 		break;
2006eacc56bbSCédric Le Goater 	}
2007eacc56bbSCédric Le Goater #endif /* CONFIG_KVM_XIVE */
2008134764edSAravinda Prasad #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
2009134764edSAravinda Prasad 	case KVM_CAP_PPC_FWNMI:
2010134764edSAravinda Prasad 		r = -EINVAL;
2011134764edSAravinda Prasad 		if (!is_kvmppc_hv_enabled(vcpu->kvm))
2012134764edSAravinda Prasad 			break;
2013134764edSAravinda Prasad 		r = 0;
2014134764edSAravinda Prasad 		vcpu->kvm->arch.fwnmi_enabled = true;
2015134764edSAravinda Prasad 		break;
2016134764edSAravinda Prasad #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
201771fbfd5fSAlexander Graf 	default:
201871fbfd5fSAlexander Graf 		r = -EINVAL;
201971fbfd5fSAlexander Graf 		break;
202071fbfd5fSAlexander Graf 	}
202171fbfd5fSAlexander Graf 
2022af8f38b3SAlexander Graf 	if (!r)
2023af8f38b3SAlexander Graf 		r = kvmppc_sanity_check(vcpu);
2024af8f38b3SAlexander Graf 
202571fbfd5fSAlexander Graf 	return r;
202671fbfd5fSAlexander Graf }
202771fbfd5fSAlexander Graf 
kvm_arch_intc_initialized(struct kvm * kvm)202834a75b0fSPaul Mackerras bool kvm_arch_intc_initialized(struct kvm *kvm)
202934a75b0fSPaul Mackerras {
203034a75b0fSPaul Mackerras #ifdef CONFIG_KVM_MPIC
203134a75b0fSPaul Mackerras 	if (kvm->arch.mpic)
203234a75b0fSPaul Mackerras 		return true;
203334a75b0fSPaul Mackerras #endif
203434a75b0fSPaul Mackerras #ifdef CONFIG_KVM_XICS
20355af50993SBenjamin Herrenschmidt 	if (kvm->arch.xics || kvm->arch.xive)
203634a75b0fSPaul Mackerras 		return true;
203734a75b0fSPaul Mackerras #endif
203834a75b0fSPaul Mackerras 	return false;
203934a75b0fSPaul Mackerras }
204034a75b0fSPaul Mackerras 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)2041bbf45ba5SHollis Blanchard int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2042bbf45ba5SHollis Blanchard                                     struct kvm_mp_state *mp_state)
2043bbf45ba5SHollis Blanchard {
2044bbf45ba5SHollis Blanchard 	return -EINVAL;
2045bbf45ba5SHollis Blanchard }
2046bbf45ba5SHollis Blanchard 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)2047bbf45ba5SHollis Blanchard int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2048bbf45ba5SHollis Blanchard                                     struct kvm_mp_state *mp_state)
2049bbf45ba5SHollis Blanchard {
2050bbf45ba5SHollis Blanchard 	return -EINVAL;
2051bbf45ba5SHollis Blanchard }
2052bbf45ba5SHollis Blanchard 
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)20535cb0944cSPaolo Bonzini long kvm_arch_vcpu_async_ioctl(struct file *filp,
2054bbf45ba5SHollis Blanchard 			       unsigned int ioctl, unsigned long arg)
2055bbf45ba5SHollis Blanchard {
2056bbf45ba5SHollis Blanchard 	struct kvm_vcpu *vcpu = filp->private_data;
2057bbf45ba5SHollis Blanchard 	void __user *argp = (void __user *)arg;
2058bbf45ba5SHollis Blanchard 
20599b062471SChristoffer Dall 	if (ioctl == KVM_INTERRUPT) {
2060bbf45ba5SHollis Blanchard 		struct kvm_interrupt irq;
2061bbf45ba5SHollis Blanchard 		if (copy_from_user(&irq, argp, sizeof(irq)))
20629b062471SChristoffer Dall 			return -EFAULT;
20639b062471SChristoffer Dall 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2064bbf45ba5SHollis Blanchard 	}
20655cb0944cSPaolo Bonzini 	return -ENOIOCTLCMD;
20665cb0944cSPaolo Bonzini }
20675cb0944cSPaolo Bonzini 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)20685cb0944cSPaolo Bonzini long kvm_arch_vcpu_ioctl(struct file *filp,
20695cb0944cSPaolo Bonzini                          unsigned int ioctl, unsigned long arg)
20705cb0944cSPaolo Bonzini {
20715cb0944cSPaolo Bonzini 	struct kvm_vcpu *vcpu = filp->private_data;
20725cb0944cSPaolo Bonzini 	void __user *argp = (void __user *)arg;
20735cb0944cSPaolo Bonzini 	long r;
207419483d14SAvi Kivity 
20759b062471SChristoffer Dall 	switch (ioctl) {
207671fbfd5fSAlexander Graf 	case KVM_ENABLE_CAP:
207771fbfd5fSAlexander Graf 	{
207871fbfd5fSAlexander Graf 		struct kvm_enable_cap cap;
207971fbfd5fSAlexander Graf 		r = -EFAULT;
208071fbfd5fSAlexander Graf 		if (copy_from_user(&cap, argp, sizeof(cap)))
208171fbfd5fSAlexander Graf 			goto out;
2082bc4188a2SNicholas Piggin 		vcpu_load(vcpu);
208371fbfd5fSAlexander Graf 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2084b3cebfe8SSimon Guo 		vcpu_put(vcpu);
208571fbfd5fSAlexander Graf 		break;
208671fbfd5fSAlexander Graf 	}
2087dc83b8bcSScott Wood 
2088e24ed81fSAlexander Graf 	case KVM_SET_ONE_REG:
2089e24ed81fSAlexander Graf 	case KVM_GET_ONE_REG:
2090e24ed81fSAlexander Graf 	{
2091e24ed81fSAlexander Graf 		struct kvm_one_reg reg;
2092e24ed81fSAlexander Graf 		r = -EFAULT;
2093e24ed81fSAlexander Graf 		if (copy_from_user(&reg, argp, sizeof(reg)))
2094e24ed81fSAlexander Graf 			goto out;
2095e24ed81fSAlexander Graf 		if (ioctl == KVM_SET_ONE_REG)
2096e24ed81fSAlexander Graf 			r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
2097e24ed81fSAlexander Graf 		else
2098e24ed81fSAlexander Graf 			r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
2099e24ed81fSAlexander Graf 		break;
2100e24ed81fSAlexander Graf 	}
2101e24ed81fSAlexander Graf 
2102bf7ca4bdSAlexander Graf #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2103dc83b8bcSScott Wood 	case KVM_DIRTY_TLB: {
2104dc83b8bcSScott Wood 		struct kvm_dirty_tlb dirty;
2105dc83b8bcSScott Wood 		r = -EFAULT;
2106dc83b8bcSScott Wood 		if (copy_from_user(&dirty, argp, sizeof(dirty)))
2107dc83b8bcSScott Wood 			goto out;
2108bc4188a2SNicholas Piggin 		vcpu_load(vcpu);
2109dc83b8bcSScott Wood 		r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2110b3cebfe8SSimon Guo 		vcpu_put(vcpu);
2111dc83b8bcSScott Wood 		break;
2112dc83b8bcSScott Wood 	}
2113dc83b8bcSScott Wood #endif
2114bbf45ba5SHollis Blanchard 	default:
2115bbf45ba5SHollis Blanchard 		r = -EINVAL;
2116bbf45ba5SHollis Blanchard 	}
2117bbf45ba5SHollis Blanchard 
2118bbf45ba5SHollis Blanchard out:
2119bbf45ba5SHollis Blanchard 	return r;
2120bbf45ba5SHollis Blanchard }
2121bbf45ba5SHollis Blanchard 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)21221499fa80SSouptick Joarder vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
21235b1c1493SCarsten Otte {
21245b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
21255b1c1493SCarsten Otte }
21265b1c1493SCarsten Otte 
kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo * pvinfo)212715711e9cSAlexander Graf static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
212815711e9cSAlexander Graf {
2129784bafacSStuart Yoder 	u32 inst_nop = 0x60000000;
2130784bafacSStuart Yoder #ifdef CONFIG_KVM_BOOKE_HV
2131784bafacSStuart Yoder 	u32 inst_sc1 = 0x44000022;
21322743103fSAlexander Graf 	pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
21332743103fSAlexander Graf 	pvinfo->hcall[1] = cpu_to_be32(inst_nop);
21342743103fSAlexander Graf 	pvinfo->hcall[2] = cpu_to_be32(inst_nop);
21352743103fSAlexander Graf 	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2136784bafacSStuart Yoder #else
213715711e9cSAlexander Graf 	u32 inst_lis = 0x3c000000;
213815711e9cSAlexander Graf 	u32 inst_ori = 0x60000000;
213915711e9cSAlexander Graf 	u32 inst_sc = 0x44000002;
214015711e9cSAlexander Graf 	u32 inst_imm_mask = 0xffff;
214115711e9cSAlexander Graf 
214215711e9cSAlexander Graf 	/*
214315711e9cSAlexander Graf 	 * The hypercall to get into KVM from within guest context is as
214415711e9cSAlexander Graf 	 * follows:
214515711e9cSAlexander Graf 	 *
214615711e9cSAlexander Graf 	 *    lis r0, r0, KVM_SC_MAGIC_R0@h
214715711e9cSAlexander Graf 	 *    ori r0, KVM_SC_MAGIC_R0@l
214815711e9cSAlexander Graf 	 *    sc
214915711e9cSAlexander Graf 	 *    nop
215015711e9cSAlexander Graf 	 */
21512743103fSAlexander Graf 	pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
21522743103fSAlexander Graf 	pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
21532743103fSAlexander Graf 	pvinfo->hcall[2] = cpu_to_be32(inst_sc);
21542743103fSAlexander Graf 	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2155784bafacSStuart Yoder #endif
215615711e9cSAlexander Graf 
21579202e076SLiu Yu-B13201 	pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
21589202e076SLiu Yu-B13201 
215915711e9cSAlexander Graf 	return 0;
216015711e9cSAlexander Graf }
216115711e9cSAlexander Graf 
kvm_arch_irqchip_in_kernel(struct kvm * kvm)2162d663b8a2SPaolo Bonzini bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
2163d663b8a2SPaolo Bonzini {
2164d663b8a2SPaolo Bonzini 	int ret = 0;
2165d663b8a2SPaolo Bonzini 
2166d663b8a2SPaolo Bonzini #ifdef CONFIG_KVM_MPIC
2167d663b8a2SPaolo Bonzini 	ret = ret || (kvm->arch.mpic != NULL);
2168d663b8a2SPaolo Bonzini #endif
2169d663b8a2SPaolo Bonzini #ifdef CONFIG_KVM_XICS
2170d663b8a2SPaolo Bonzini 	ret = ret || (kvm->arch.xics != NULL);
2171d663b8a2SPaolo Bonzini 	ret = ret || (kvm->arch.xive != NULL);
2172d663b8a2SPaolo Bonzini #endif
2173d663b8a2SPaolo Bonzini 	smp_rmb();
2174d663b8a2SPaolo Bonzini 	return ret;
2175d663b8a2SPaolo Bonzini }
2176d663b8a2SPaolo Bonzini 
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irq_event,bool line_status)21775efdb4beSAlexander Graf int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
21785efdb4beSAlexander Graf 			  bool line_status)
21795efdb4beSAlexander Graf {
2180d663b8a2SPaolo Bonzini 	if (!kvm_arch_irqchip_in_kernel(kvm))
21815efdb4beSAlexander Graf 		return -ENXIO;
21825efdb4beSAlexander Graf 
21835efdb4beSAlexander Graf 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
21845efdb4beSAlexander Graf 					irq_event->irq, irq_event->level,
21855efdb4beSAlexander Graf 					line_status);
21865efdb4beSAlexander Graf 	return 0;
21875efdb4beSAlexander Graf }
21885efdb4beSAlexander Graf 
2189699a0ea0SPaul Mackerras 
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)2190e5d83c74SPaolo Bonzini int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2191699a0ea0SPaul Mackerras 			    struct kvm_enable_cap *cap)
2192699a0ea0SPaul Mackerras {
2193699a0ea0SPaul Mackerras 	int r;
2194699a0ea0SPaul Mackerras 
2195699a0ea0SPaul Mackerras 	if (cap->flags)
2196699a0ea0SPaul Mackerras 		return -EINVAL;
2197699a0ea0SPaul Mackerras 
2198699a0ea0SPaul Mackerras 	switch (cap->cap) {
2199699a0ea0SPaul Mackerras #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2200699a0ea0SPaul Mackerras 	case KVM_CAP_PPC_ENABLE_HCALL: {
2201699a0ea0SPaul Mackerras 		unsigned long hcall = cap->args[0];
2202699a0ea0SPaul Mackerras 
2203699a0ea0SPaul Mackerras 		r = -EINVAL;
2204699a0ea0SPaul Mackerras 		if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2205699a0ea0SPaul Mackerras 		    cap->args[1] > 1)
2206699a0ea0SPaul Mackerras 			break;
2207ae2113a4SPaul Mackerras 		if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2208ae2113a4SPaul Mackerras 			break;
2209699a0ea0SPaul Mackerras 		if (cap->args[1])
2210699a0ea0SPaul Mackerras 			set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2211699a0ea0SPaul Mackerras 		else
2212699a0ea0SPaul Mackerras 			clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2213699a0ea0SPaul Mackerras 		r = 0;
2214699a0ea0SPaul Mackerras 		break;
2215699a0ea0SPaul Mackerras 	}
22163c313524SPaul Mackerras 	case KVM_CAP_PPC_SMT: {
22173c313524SPaul Mackerras 		unsigned long mode = cap->args[0];
22183c313524SPaul Mackerras 		unsigned long flags = cap->args[1];
22193c313524SPaul Mackerras 
22203c313524SPaul Mackerras 		r = -EINVAL;
22213c313524SPaul Mackerras 		if (kvm->arch.kvm_ops->set_smt_mode)
22223c313524SPaul Mackerras 			r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
22233c313524SPaul Mackerras 		break;
22243c313524SPaul Mackerras 	}
2225aa069a99SPaul Mackerras 
2226aa069a99SPaul Mackerras 	case KVM_CAP_PPC_NESTED_HV:
2227aa069a99SPaul Mackerras 		r = -EINVAL;
2228aa069a99SPaul Mackerras 		if (!is_kvmppc_hv_enabled(kvm) ||
2229aa069a99SPaul Mackerras 		    !kvm->arch.kvm_ops->enable_nested)
2230aa069a99SPaul Mackerras 			break;
2231aa069a99SPaul Mackerras 		r = kvm->arch.kvm_ops->enable_nested(kvm);
2232aa069a99SPaul Mackerras 		break;
2233699a0ea0SPaul Mackerras #endif
22349a5788c6SPaul Mackerras #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
22359a5788c6SPaul Mackerras 	case KVM_CAP_PPC_SECURE_GUEST:
22369a5788c6SPaul Mackerras 		r = -EINVAL;
22379a5788c6SPaul Mackerras 		if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
22389a5788c6SPaul Mackerras 			break;
22399a5788c6SPaul Mackerras 		r = kvm->arch.kvm_ops->enable_svm(kvm);
22409a5788c6SPaul Mackerras 		break;
2241d9a47edaSRavi Bangoria 	case KVM_CAP_PPC_DAWR1:
2242d9a47edaSRavi Bangoria 		r = -EINVAL;
2243d9a47edaSRavi Bangoria 		if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2244d9a47edaSRavi Bangoria 			break;
2245d9a47edaSRavi Bangoria 		r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2246d9a47edaSRavi Bangoria 		break;
22479a5788c6SPaul Mackerras #endif
2248699a0ea0SPaul Mackerras 	default:
2249699a0ea0SPaul Mackerras 		r = -EINVAL;
2250699a0ea0SPaul Mackerras 		break;
2251699a0ea0SPaul Mackerras 	}
2252699a0ea0SPaul Mackerras 
2253699a0ea0SPaul Mackerras 	return r;
2254699a0ea0SPaul Mackerras }
2255699a0ea0SPaul Mackerras 
22563214d01fSPaul Mackerras #ifdef CONFIG_PPC_BOOK3S_64
22573214d01fSPaul Mackerras /*
22583214d01fSPaul Mackerras  * These functions check whether the underlying hardware is safe
22593214d01fSPaul Mackerras  * against attacks based on observing the effects of speculatively
22603214d01fSPaul Mackerras  * executed instructions, and whether it supplies instructions for
22613214d01fSPaul Mackerras  * use in workarounds.  The information comes from firmware, either
22623214d01fSPaul Mackerras  * via the device tree on powernv platforms or from an hcall on
22633214d01fSPaul Mackerras  * pseries platforms.
22643214d01fSPaul Mackerras  */
22653214d01fSPaul Mackerras #ifdef CONFIG_PPC_PSERIES
pseries_get_cpu_char(struct kvm_ppc_cpu_char * cp)22663214d01fSPaul Mackerras static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
22673214d01fSPaul Mackerras {
22683214d01fSPaul Mackerras 	struct h_cpu_char_result c;
22693214d01fSPaul Mackerras 	unsigned long rc;
22703214d01fSPaul Mackerras 
22713214d01fSPaul Mackerras 	if (!machine_is(pseries))
22723214d01fSPaul Mackerras 		return -ENOTTY;
22733214d01fSPaul Mackerras 
22743214d01fSPaul Mackerras 	rc = plpar_get_cpu_characteristics(&c);
22753214d01fSPaul Mackerras 	if (rc == H_SUCCESS) {
22763214d01fSPaul Mackerras 		cp->character = c.character;
22773214d01fSPaul Mackerras 		cp->behaviour = c.behaviour;
22783214d01fSPaul Mackerras 		cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
22793214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
22803214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
22813214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
22823214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
22833214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
22843214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
22852b57ecd0SSuraj Jitindar Singh 			KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
22862b57ecd0SSuraj Jitindar Singh 			KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
22873214d01fSPaul Mackerras 		cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
22883214d01fSPaul Mackerras 			KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
22892b57ecd0SSuraj Jitindar Singh 			KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
22902b57ecd0SSuraj Jitindar Singh 			KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
22913214d01fSPaul Mackerras 	}
22923214d01fSPaul Mackerras 	return 0;
22933214d01fSPaul Mackerras }
22943214d01fSPaul Mackerras #else
pseries_get_cpu_char(struct kvm_ppc_cpu_char * cp)22953214d01fSPaul Mackerras static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
22963214d01fSPaul Mackerras {
22973214d01fSPaul Mackerras 	return -ENOTTY;
22983214d01fSPaul Mackerras }
22993214d01fSPaul Mackerras #endif
23003214d01fSPaul Mackerras 
have_fw_feat(struct device_node * fw_features,const char * state,const char * name)23013214d01fSPaul Mackerras static inline bool have_fw_feat(struct device_node *fw_features,
23023214d01fSPaul Mackerras 				const char *state, const char *name)
23033214d01fSPaul Mackerras {
23043214d01fSPaul Mackerras 	struct device_node *np;
23053214d01fSPaul Mackerras 	bool r = false;
23063214d01fSPaul Mackerras 
23073214d01fSPaul Mackerras 	np = of_get_child_by_name(fw_features, name);
23083214d01fSPaul Mackerras 	if (np) {
23093214d01fSPaul Mackerras 		r = of_property_read_bool(np, state);
23103214d01fSPaul Mackerras 		of_node_put(np);
23113214d01fSPaul Mackerras 	}
23123214d01fSPaul Mackerras 	return r;
23133214d01fSPaul Mackerras }
23143214d01fSPaul Mackerras 
kvmppc_get_cpu_char(struct kvm_ppc_cpu_char * cp)23153214d01fSPaul Mackerras static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
23163214d01fSPaul Mackerras {
23173214d01fSPaul Mackerras 	struct device_node *np, *fw_features;
23183214d01fSPaul Mackerras 	int r;
23193214d01fSPaul Mackerras 
23203214d01fSPaul Mackerras 	memset(cp, 0, sizeof(*cp));
23213214d01fSPaul Mackerras 	r = pseries_get_cpu_char(cp);
23223214d01fSPaul Mackerras 	if (r != -ENOTTY)
23233214d01fSPaul Mackerras 		return r;
23243214d01fSPaul Mackerras 
23253214d01fSPaul Mackerras 	np = of_find_node_by_name(NULL, "ibm,opal");
23263214d01fSPaul Mackerras 	if (np) {
23273214d01fSPaul Mackerras 		fw_features = of_get_child_by_name(np, "fw-features");
23283214d01fSPaul Mackerras 		of_node_put(np);
23293214d01fSPaul Mackerras 		if (!fw_features)
23303214d01fSPaul Mackerras 			return 0;
23313214d01fSPaul Mackerras 		if (have_fw_feat(fw_features, "enabled",
23323214d01fSPaul Mackerras 				 "inst-spec-barrier-ori31,31,0"))
23333214d01fSPaul Mackerras 			cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
23343214d01fSPaul Mackerras 		if (have_fw_feat(fw_features, "enabled",
23353214d01fSPaul Mackerras 				 "fw-bcctrl-serialized"))
23363214d01fSPaul Mackerras 			cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
23373214d01fSPaul Mackerras 		if (have_fw_feat(fw_features, "enabled",
23383214d01fSPaul Mackerras 				 "inst-l1d-flush-ori30,30,0"))
23393214d01fSPaul Mackerras 			cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
23403214d01fSPaul Mackerras 		if (have_fw_feat(fw_features, "enabled",
23413214d01fSPaul Mackerras 				 "inst-l1d-flush-trig2"))
23423214d01fSPaul Mackerras 			cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
23433214d01fSPaul Mackerras 		if (have_fw_feat(fw_features, "enabled",
23443214d01fSPaul Mackerras 				 "fw-l1d-thread-split"))
23453214d01fSPaul Mackerras 			cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
23463214d01fSPaul Mackerras 		if (have_fw_feat(fw_features, "enabled",
23473214d01fSPaul Mackerras 				 "fw-count-cache-disabled"))
23483214d01fSPaul Mackerras 			cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
23492b57ecd0SSuraj Jitindar Singh 		if (have_fw_feat(fw_features, "enabled",
23502b57ecd0SSuraj Jitindar Singh 				 "fw-count-cache-flush-bcctr2,0,0"))
23512b57ecd0SSuraj Jitindar Singh 			cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
23523214d01fSPaul Mackerras 		cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
23533214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
23543214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
23553214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
23563214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
23572b57ecd0SSuraj Jitindar Singh 			KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
23582b57ecd0SSuraj Jitindar Singh 			KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
23593214d01fSPaul Mackerras 
23603214d01fSPaul Mackerras 		if (have_fw_feat(fw_features, "enabled",
23613214d01fSPaul Mackerras 				 "speculation-policy-favor-security"))
23623214d01fSPaul Mackerras 			cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
23633214d01fSPaul Mackerras 		if (!have_fw_feat(fw_features, "disabled",
23643214d01fSPaul Mackerras 				  "needs-l1d-flush-msr-pr-0-to-1"))
23653214d01fSPaul Mackerras 			cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
23663214d01fSPaul Mackerras 		if (!have_fw_feat(fw_features, "disabled",
23673214d01fSPaul Mackerras 				  "needs-spec-barrier-for-bound-checks"))
23683214d01fSPaul Mackerras 			cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
23692b57ecd0SSuraj Jitindar Singh 		if (have_fw_feat(fw_features, "enabled",
23702b57ecd0SSuraj Jitindar Singh 				 "needs-count-cache-flush-on-context-switch"))
23712b57ecd0SSuraj Jitindar Singh 			cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
23723214d01fSPaul Mackerras 		cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
23733214d01fSPaul Mackerras 			KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
23742b57ecd0SSuraj Jitindar Singh 			KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
23752b57ecd0SSuraj Jitindar Singh 			KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
23763214d01fSPaul Mackerras 
23773214d01fSPaul Mackerras 		of_node_put(fw_features);
23783214d01fSPaul Mackerras 	}
23793214d01fSPaul Mackerras 
23803214d01fSPaul Mackerras 	return 0;
23813214d01fSPaul Mackerras }
23823214d01fSPaul Mackerras #endif
23833214d01fSPaul Mackerras 
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)2384d8708b80SThomas Huth int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
2385bbf45ba5SHollis Blanchard {
23865df554adSScott Wood 	struct kvm *kvm __maybe_unused = filp->private_data;
238715711e9cSAlexander Graf 	void __user *argp = (void __user *)arg;
2388d8708b80SThomas Huth 	int r;
2389bbf45ba5SHollis Blanchard 
2390bbf45ba5SHollis Blanchard 	switch (ioctl) {
239115711e9cSAlexander Graf 	case KVM_PPC_GET_PVINFO: {
239215711e9cSAlexander Graf 		struct kvm_ppc_pvinfo pvinfo;
2393d8cdddcdSVasiliy Kulikov 		memset(&pvinfo, 0, sizeof(pvinfo));
239415711e9cSAlexander Graf 		r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
239515711e9cSAlexander Graf 		if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
239615711e9cSAlexander Graf 			r = -EFAULT;
239715711e9cSAlexander Graf 			goto out;
239815711e9cSAlexander Graf 		}
239915711e9cSAlexander Graf 
240015711e9cSAlexander Graf 		break;
240115711e9cSAlexander Graf 	}
240276d837a4SPaul Mackerras #ifdef CONFIG_SPAPR_TCE_IOMMU
240358ded420SAlexey Kardashevskiy 	case KVM_CREATE_SPAPR_TCE_64: {
240458ded420SAlexey Kardashevskiy 		struct kvm_create_spapr_tce_64 create_tce_64;
240558ded420SAlexey Kardashevskiy 
240658ded420SAlexey Kardashevskiy 		r = -EFAULT;
240758ded420SAlexey Kardashevskiy 		if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
240858ded420SAlexey Kardashevskiy 			goto out;
240958ded420SAlexey Kardashevskiy 		if (create_tce_64.flags) {
241058ded420SAlexey Kardashevskiy 			r = -EINVAL;
241158ded420SAlexey Kardashevskiy 			goto out;
241258ded420SAlexey Kardashevskiy 		}
241358ded420SAlexey Kardashevskiy 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
241458ded420SAlexey Kardashevskiy 		goto out;
241558ded420SAlexey Kardashevskiy 	}
241654738c09SDavid Gibson 	case KVM_CREATE_SPAPR_TCE: {
241754738c09SDavid Gibson 		struct kvm_create_spapr_tce create_tce;
241858ded420SAlexey Kardashevskiy 		struct kvm_create_spapr_tce_64 create_tce_64;
241954738c09SDavid Gibson 
242054738c09SDavid Gibson 		r = -EFAULT;
242154738c09SDavid Gibson 		if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
242254738c09SDavid Gibson 			goto out;
242358ded420SAlexey Kardashevskiy 
242458ded420SAlexey Kardashevskiy 		create_tce_64.liobn = create_tce.liobn;
242558ded420SAlexey Kardashevskiy 		create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
242658ded420SAlexey Kardashevskiy 		create_tce_64.offset = 0;
242758ded420SAlexey Kardashevskiy 		create_tce_64.size = create_tce.window_size >>
242858ded420SAlexey Kardashevskiy 				IOMMU_PAGE_SHIFT_4K;
242958ded420SAlexey Kardashevskiy 		create_tce_64.flags = 0;
243058ded420SAlexey Kardashevskiy 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
243154738c09SDavid Gibson 		goto out;
243254738c09SDavid Gibson 	}
243376d837a4SPaul Mackerras #endif
243476d837a4SPaul Mackerras #ifdef CONFIG_PPC_BOOK3S_64
24355b74716eSBenjamin Herrenschmidt 	case KVM_PPC_GET_SMMU_INFO: {
24365b74716eSBenjamin Herrenschmidt 		struct kvm_ppc_smmu_info info;
2437cbbc58d4SAneesh Kumar K.V 		struct kvm *kvm = filp->private_data;
24385b74716eSBenjamin Herrenschmidt 
24395b74716eSBenjamin Herrenschmidt 		memset(&info, 0, sizeof(info));
2440cbbc58d4SAneesh Kumar K.V 		r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
24415b74716eSBenjamin Herrenschmidt 		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
24425b74716eSBenjamin Herrenschmidt 			r = -EFAULT;
24435b74716eSBenjamin Herrenschmidt 		break;
24445b74716eSBenjamin Herrenschmidt 	}
24458e591cb7SMichael Ellerman 	case KVM_PPC_RTAS_DEFINE_TOKEN: {
24468e591cb7SMichael Ellerman 		struct kvm *kvm = filp->private_data;
24478e591cb7SMichael Ellerman 
24488e591cb7SMichael Ellerman 		r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
24498e591cb7SMichael Ellerman 		break;
24508e591cb7SMichael Ellerman 	}
2451c9270132SPaul Mackerras 	case KVM_PPC_CONFIGURE_V3_MMU: {
2452c9270132SPaul Mackerras 		struct kvm *kvm = filp->private_data;
2453c9270132SPaul Mackerras 		struct kvm_ppc_mmuv3_cfg cfg;
2454c9270132SPaul Mackerras 
2455c9270132SPaul Mackerras 		r = -EINVAL;
2456c9270132SPaul Mackerras 		if (!kvm->arch.kvm_ops->configure_mmu)
2457c9270132SPaul Mackerras 			goto out;
2458c9270132SPaul Mackerras 		r = -EFAULT;
2459c9270132SPaul Mackerras 		if (copy_from_user(&cfg, argp, sizeof(cfg)))
2460c9270132SPaul Mackerras 			goto out;
2461c9270132SPaul Mackerras 		r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2462c9270132SPaul Mackerras 		break;
2463c9270132SPaul Mackerras 	}
2464c9270132SPaul Mackerras 	case KVM_PPC_GET_RMMU_INFO: {
2465c9270132SPaul Mackerras 		struct kvm *kvm = filp->private_data;
2466c9270132SPaul Mackerras 		struct kvm_ppc_rmmu_info info;
2467c9270132SPaul Mackerras 
2468c9270132SPaul Mackerras 		r = -EINVAL;
2469c9270132SPaul Mackerras 		if (!kvm->arch.kvm_ops->get_rmmu_info)
2470c9270132SPaul Mackerras 			goto out;
2471c9270132SPaul Mackerras 		r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2472c9270132SPaul Mackerras 		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2473c9270132SPaul Mackerras 			r = -EFAULT;
2474c9270132SPaul Mackerras 		break;
2475c9270132SPaul Mackerras 	}
24763214d01fSPaul Mackerras 	case KVM_PPC_GET_CPU_CHAR: {
24773214d01fSPaul Mackerras 		struct kvm_ppc_cpu_char cpuchar;
24783214d01fSPaul Mackerras 
24793214d01fSPaul Mackerras 		r = kvmppc_get_cpu_char(&cpuchar);
24803214d01fSPaul Mackerras 		if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
24813214d01fSPaul Mackerras 			r = -EFAULT;
24823214d01fSPaul Mackerras 		break;
24833214d01fSPaul Mackerras 	}
248422945688SBharata B Rao 	case KVM_PPC_SVM_OFF: {
248522945688SBharata B Rao 		struct kvm *kvm = filp->private_data;
248622945688SBharata B Rao 
248722945688SBharata B Rao 		r = 0;
248822945688SBharata B Rao 		if (!kvm->arch.kvm_ops->svm_off)
248922945688SBharata B Rao 			goto out;
249022945688SBharata B Rao 
249122945688SBharata B Rao 		r = kvm->arch.kvm_ops->svm_off(kvm);
249222945688SBharata B Rao 		break;
249322945688SBharata B Rao 	}
2494cbbc58d4SAneesh Kumar K.V 	default: {
2495cbbc58d4SAneesh Kumar K.V 		struct kvm *kvm = filp->private_data;
2496cbbc58d4SAneesh Kumar K.V 		r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2497cbbc58d4SAneesh Kumar K.V 	}
24983a167beaSAneesh Kumar K.V #else /* CONFIG_PPC_BOOK3S_64 */
2499bbf45ba5SHollis Blanchard 	default:
2500367e1319SAvi Kivity 		r = -ENOTTY;
25013a167beaSAneesh Kumar K.V #endif
2502bbf45ba5SHollis Blanchard 	}
250315711e9cSAlexander Graf out:
2504bbf45ba5SHollis Blanchard 	return r;
2505bbf45ba5SHollis Blanchard }
2506bbf45ba5SHollis Blanchard 
25076ba2a292SNicholas Piggin static DEFINE_IDA(lpid_inuse);
2508043cc4d7SScott Wood static unsigned long nr_lpids;
2509043cc4d7SScott Wood 
kvmppc_alloc_lpid(void)2510043cc4d7SScott Wood long kvmppc_alloc_lpid(void)
2511043cc4d7SScott Wood {
25126ba2a292SNicholas Piggin 	int lpid;
2513043cc4d7SScott Wood 
25146ba2a292SNicholas Piggin 	/* The host LPID must always be 0 (allocation starts at 1) */
25156ba2a292SNicholas Piggin 	lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
25166ba2a292SNicholas Piggin 	if (lpid < 0) {
25176ba2a292SNicholas Piggin 		if (lpid == -ENOMEM)
25186ba2a292SNicholas Piggin 			pr_err("%s: Out of memory\n", __func__);
25196ba2a292SNicholas Piggin 		else
2520043cc4d7SScott Wood 			pr_err("%s: No LPIDs free\n", __func__);
2521043cc4d7SScott Wood 		return -ENOMEM;
2522043cc4d7SScott Wood 	}
2523043cc4d7SScott Wood 
2524043cc4d7SScott Wood 	return lpid;
2525043cc4d7SScott Wood }
25262ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2527043cc4d7SScott Wood 
kvmppc_free_lpid(long lpid)2528043cc4d7SScott Wood void kvmppc_free_lpid(long lpid)
2529043cc4d7SScott Wood {
25306ba2a292SNicholas Piggin 	ida_free(&lpid_inuse, lpid);
2531043cc4d7SScott Wood }
25322ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2533043cc4d7SScott Wood 
25346ba2a292SNicholas Piggin /* nr_lpids_param includes the host LPID */
kvmppc_init_lpid(unsigned long nr_lpids_param)2535043cc4d7SScott Wood void kvmppc_init_lpid(unsigned long nr_lpids_param)
2536043cc4d7SScott Wood {
25376ba2a292SNicholas Piggin 	nr_lpids = nr_lpids_param;
2538043cc4d7SScott Wood }
25392ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2540043cc4d7SScott Wood 
2541478d6686SPaolo Bonzini EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2542faf01aefSAlexey Kardashevskiy 
kvm_arch_create_vcpu_debugfs(struct kvm_vcpu * vcpu,struct dentry * debugfs_dentry)2543faf01aefSAlexey Kardashevskiy void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
2544faf01aefSAlexey Kardashevskiy {
2545faf01aefSAlexey Kardashevskiy 	if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
2546faf01aefSAlexey Kardashevskiy 		vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
2547faf01aefSAlexey Kardashevskiy }
2548faf01aefSAlexey Kardashevskiy 
kvm_arch_create_vm_debugfs(struct kvm * kvm)2549faf01aefSAlexey Kardashevskiy int kvm_arch_create_vm_debugfs(struct kvm *kvm)
2550faf01aefSAlexey Kardashevskiy {
2551faf01aefSAlexey Kardashevskiy 	if (kvm->arch.kvm_ops->create_vm_debugfs)
2552faf01aefSAlexey Kardashevskiy 		kvm->arch.kvm_ops->create_vm_debugfs(kvm);
2553faf01aefSAlexey Kardashevskiy 	return 0;
2554faf01aefSAlexey Kardashevskiy }
2555