xref: /openbmc/linux/arch/powerpc/kvm/powerpc.c (revision acf17878)
1d94d71cbSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2bbf45ba5SHollis Blanchard /*
3bbf45ba5SHollis Blanchard  *
4bbf45ba5SHollis Blanchard  * Copyright IBM Corp. 2007
5bbf45ba5SHollis Blanchard  *
6bbf45ba5SHollis Blanchard  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7bbf45ba5SHollis Blanchard  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
8bbf45ba5SHollis Blanchard  */
9bbf45ba5SHollis Blanchard 
10bbf45ba5SHollis Blanchard #include <linux/errno.h>
11bbf45ba5SHollis Blanchard #include <linux/err.h>
12bbf45ba5SHollis Blanchard #include <linux/kvm_host.h>
13bbf45ba5SHollis Blanchard #include <linux/vmalloc.h>
14544c6761SAlexander Graf #include <linux/hrtimer.h>
15174cd4b1SIngo Molnar #include <linux/sched/signal.h>
16bbf45ba5SHollis Blanchard #include <linux/fs.h>
175a0e3ad6STejun Heo #include <linux/slab.h>
18eb1e4f43SScott Wood #include <linux/file.h>
19cbbc58d4SAneesh Kumar K.V #include <linux/module.h>
209576730dSSuresh Warrier #include <linux/irqbypass.h>
219576730dSSuresh Warrier #include <linux/kvm_irqfd.h>
22e6f6390aSChristophe Leroy #include <linux/of.h>
23bbf45ba5SHollis Blanchard #include <asm/cputable.h>
247c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
25bbf45ba5SHollis Blanchard #include <asm/kvm_ppc.h>
26371fefd6SPaul Mackerras #include <asm/cputhreads.h>
27bd2be683SAlexander Graf #include <asm/irqflags.h>
2858ded420SAlexey Kardashevskiy #include <asm/iommu.h>
296f63e81bSBin Lu #include <asm/switch_to.h>
305af50993SBenjamin Herrenschmidt #include <asm/xive.h>
313214d01fSPaul Mackerras #ifdef CONFIG_PPC_PSERIES
323214d01fSPaul Mackerras #include <asm/hvcall.h>
333214d01fSPaul Mackerras #include <asm/plpar_wrappers.h>
343214d01fSPaul Mackerras #endif
3522945688SBharata B Rao #include <asm/ultravisor.h>
36113fe88eSChristophe Leroy #include <asm/setup.h>
375af50993SBenjamin Herrenschmidt 
3873e75b41SHollis Blanchard #include "timing.h"
39fad7b9b5SPaul Mackerras #include "../mm/mmu_decl.h"
40bbf45ba5SHollis Blanchard 
4146f43c6eSMarcelo Tosatti #define CREATE_TRACE_POINTS
4246f43c6eSMarcelo Tosatti #include "trace.h"
4346f43c6eSMarcelo Tosatti 
44cbbc58d4SAneesh Kumar K.V struct kvmppc_ops *kvmppc_hv_ops;
45cbbc58d4SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
46cbbc58d4SAneesh Kumar K.V struct kvmppc_ops *kvmppc_pr_ops;
47cbbc58d4SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
48cbbc58d4SAneesh Kumar K.V 
493a167beaSAneesh Kumar K.V 
kvm_arch_vcpu_runnable(struct kvm_vcpu * v)50bbf45ba5SHollis Blanchard int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
51bbf45ba5SHollis Blanchard {
522fa6e1e1SRadim Krčmář 	return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
53bbf45ba5SHollis Blanchard }
54bbf45ba5SHollis Blanchard 
kvm_arch_dy_runnable(struct kvm_vcpu * vcpu)5517e433b5SWanpeng Li bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
5617e433b5SWanpeng Li {
5717e433b5SWanpeng Li 	return kvm_arch_vcpu_runnable(vcpu);
5817e433b5SWanpeng Li }
5917e433b5SWanpeng Li 
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)60199b5763SLongpeng(Mike) bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
61199b5763SLongpeng(Mike) {
62199b5763SLongpeng(Mike) 	return false;
63199b5763SLongpeng(Mike) }
64199b5763SLongpeng(Mike) 
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)65b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
66b6d33834SChristoffer Dall {
67b6d33834SChristoffer Dall 	return 1;
68b6d33834SChristoffer Dall }
69b6d33834SChristoffer Dall 
7003d25c5bSAlexander Graf /*
7103d25c5bSAlexander Graf  * Common checks before entering the guest world.  Call with interrupts
7203d25c5bSAlexander Graf  * disabled.
7303d25c5bSAlexander Graf  *
747ee78855SAlexander Graf  * returns:
757ee78855SAlexander Graf  *
767ee78855SAlexander Graf  * == 1 if we're ready to go into guest state
777ee78855SAlexander Graf  * <= 0 if we need to go back to the host with return value
7803d25c5bSAlexander Graf  */
kvmppc_prepare_to_enter(struct kvm_vcpu * vcpu)7903d25c5bSAlexander Graf int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
8003d25c5bSAlexander Graf {
816c85f52bSScott Wood 	int r;
8203d25c5bSAlexander Graf 
836c85f52bSScott Wood 	WARN_ON(irqs_disabled());
846c85f52bSScott Wood 	hard_irq_disable();
856c85f52bSScott Wood 
8603d25c5bSAlexander Graf 	while (true) {
8703d25c5bSAlexander Graf 		if (need_resched()) {
8803d25c5bSAlexander Graf 			local_irq_enable();
8903d25c5bSAlexander Graf 			cond_resched();
906c85f52bSScott Wood 			hard_irq_disable();
9103d25c5bSAlexander Graf 			continue;
9203d25c5bSAlexander Graf 		}
9303d25c5bSAlexander Graf 
9403d25c5bSAlexander Graf 		if (signal_pending(current)) {
957ee78855SAlexander Graf 			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
967ee78855SAlexander Graf 			vcpu->run->exit_reason = KVM_EXIT_INTR;
977ee78855SAlexander Graf 			r = -EINTR;
9803d25c5bSAlexander Graf 			break;
9903d25c5bSAlexander Graf 		}
10003d25c5bSAlexander Graf 
1015bd1cf11SScott Wood 		vcpu->mode = IN_GUEST_MODE;
1025bd1cf11SScott Wood 
1035bd1cf11SScott Wood 		/*
1045bd1cf11SScott Wood 		 * Reading vcpu->requests must happen after setting vcpu->mode,
1055bd1cf11SScott Wood 		 * so we don't miss a request because the requester sees
1065bd1cf11SScott Wood 		 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
1075bd1cf11SScott Wood 		 * before next entering the guest (and thus doesn't IPI).
108489153c7SLan Tianyu 		 * This also orders the write to mode from any reads
109489153c7SLan Tianyu 		 * to the page tables done while the VCPU is running.
110489153c7SLan Tianyu 		 * Please see the comment in kvm_flush_remote_tlbs.
1115bd1cf11SScott Wood 		 */
11203d25c5bSAlexander Graf 		smp_mb();
1135bd1cf11SScott Wood 
1142fa6e1e1SRadim Krčmář 		if (kvm_request_pending(vcpu)) {
11503d25c5bSAlexander Graf 			/* Make sure we process requests preemptable */
11603d25c5bSAlexander Graf 			local_irq_enable();
11703d25c5bSAlexander Graf 			trace_kvm_check_requests(vcpu);
1187c973a2eSAlexander Graf 			r = kvmppc_core_check_requests(vcpu);
1196c85f52bSScott Wood 			hard_irq_disable();
1207c973a2eSAlexander Graf 			if (r > 0)
12103d25c5bSAlexander Graf 				continue;
1227c973a2eSAlexander Graf 			break;
12303d25c5bSAlexander Graf 		}
12403d25c5bSAlexander Graf 
12503d25c5bSAlexander Graf 		if (kvmppc_core_prepare_to_enter(vcpu)) {
12603d25c5bSAlexander Graf 			/* interrupts got enabled in between, so we
12703d25c5bSAlexander Graf 			   are back at square 1 */
12803d25c5bSAlexander Graf 			continue;
12903d25c5bSAlexander Graf 		}
13003d25c5bSAlexander Graf 
1316edaa530SPaolo Bonzini 		guest_enter_irqoff();
1326c85f52bSScott Wood 		return 1;
13303d25c5bSAlexander Graf 	}
13403d25c5bSAlexander Graf 
1356c85f52bSScott Wood 	/* return to host */
1366c85f52bSScott Wood 	local_irq_enable();
13703d25c5bSAlexander Graf 	return r;
13803d25c5bSAlexander Graf }
1392ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
14003d25c5bSAlexander Graf 
1415deb8e7aSAlexander Graf #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
kvmppc_swab_shared(struct kvm_vcpu * vcpu)1425deb8e7aSAlexander Graf static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
1435deb8e7aSAlexander Graf {
1445deb8e7aSAlexander Graf 	struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
1455deb8e7aSAlexander Graf 	int i;
1465deb8e7aSAlexander Graf 
1475deb8e7aSAlexander Graf 	shared->sprg0 = swab64(shared->sprg0);
1485deb8e7aSAlexander Graf 	shared->sprg1 = swab64(shared->sprg1);
1495deb8e7aSAlexander Graf 	shared->sprg2 = swab64(shared->sprg2);
1505deb8e7aSAlexander Graf 	shared->sprg3 = swab64(shared->sprg3);
1515deb8e7aSAlexander Graf 	shared->srr0 = swab64(shared->srr0);
1525deb8e7aSAlexander Graf 	shared->srr1 = swab64(shared->srr1);
1535deb8e7aSAlexander Graf 	shared->dar = swab64(shared->dar);
1545deb8e7aSAlexander Graf 	shared->msr = swab64(shared->msr);
1555deb8e7aSAlexander Graf 	shared->dsisr = swab32(shared->dsisr);
1565deb8e7aSAlexander Graf 	shared->int_pending = swab32(shared->int_pending);
1575deb8e7aSAlexander Graf 	for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
1585deb8e7aSAlexander Graf 		shared->sr[i] = swab32(shared->sr[i]);
1595deb8e7aSAlexander Graf }
1605deb8e7aSAlexander Graf #endif
1615deb8e7aSAlexander Graf 
kvmppc_kvm_pv(struct kvm_vcpu * vcpu)1622a342ed5SAlexander Graf int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
1632a342ed5SAlexander Graf {
1642a342ed5SAlexander Graf 	int nr = kvmppc_get_gpr(vcpu, 11);
1652a342ed5SAlexander Graf 	int r;
1662a342ed5SAlexander Graf 	unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
1672a342ed5SAlexander Graf 	unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
1682a342ed5SAlexander Graf 	unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
1692a342ed5SAlexander Graf 	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
1702a342ed5SAlexander Graf 	unsigned long r2 = 0;
1712a342ed5SAlexander Graf 
1725deb8e7aSAlexander Graf 	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
1732a342ed5SAlexander Graf 		/* 32 bit mode */
1742a342ed5SAlexander Graf 		param1 &= 0xffffffff;
1752a342ed5SAlexander Graf 		param2 &= 0xffffffff;
1762a342ed5SAlexander Graf 		param3 &= 0xffffffff;
1772a342ed5SAlexander Graf 		param4 &= 0xffffffff;
1782a342ed5SAlexander Graf 	}
1792a342ed5SAlexander Graf 
1802a342ed5SAlexander Graf 	switch (nr) {
181fdcf8bd7SStuart Yoder 	case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
1825fc87407SAlexander Graf 	{
1835deb8e7aSAlexander Graf #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
1845deb8e7aSAlexander Graf 		/* Book3S can be little endian, find it out here */
1855deb8e7aSAlexander Graf 		int shared_big_endian = true;
1865deb8e7aSAlexander Graf 		if (vcpu->arch.intr_msr & MSR_LE)
1875deb8e7aSAlexander Graf 			shared_big_endian = false;
1885deb8e7aSAlexander Graf 		if (shared_big_endian != vcpu->arch.shared_big_endian)
1895deb8e7aSAlexander Graf 			kvmppc_swab_shared(vcpu);
1905deb8e7aSAlexander Graf 		vcpu->arch.shared_big_endian = shared_big_endian;
1915deb8e7aSAlexander Graf #endif
1925deb8e7aSAlexander Graf 
193f3383cf8SAlexander Graf 		if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
194f3383cf8SAlexander Graf 			/*
195f3383cf8SAlexander Graf 			 * Older versions of the Linux magic page code had
196f3383cf8SAlexander Graf 			 * a bug where they would map their trampoline code
197f3383cf8SAlexander Graf 			 * NX. If that's the case, remove !PR NX capability.
198f3383cf8SAlexander Graf 			 */
199f3383cf8SAlexander Graf 			vcpu->arch.disable_kernel_nx = true;
200f3383cf8SAlexander Graf 			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
201f3383cf8SAlexander Graf 		}
202f3383cf8SAlexander Graf 
203f3383cf8SAlexander Graf 		vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
204f3383cf8SAlexander Graf 		vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
2055fc87407SAlexander Graf 
20689b68c96SAlexander Graf #ifdef CONFIG_PPC_64K_PAGES
20789b68c96SAlexander Graf 		/*
20889b68c96SAlexander Graf 		 * Make sure our 4k magic page is in the same window of a 64k
20989b68c96SAlexander Graf 		 * page within the guest and within the host's page.
21089b68c96SAlexander Graf 		 */
21189b68c96SAlexander Graf 		if ((vcpu->arch.magic_page_pa & 0xf000) !=
21289b68c96SAlexander Graf 		    ((ulong)vcpu->arch.shared & 0xf000)) {
21389b68c96SAlexander Graf 			void *old_shared = vcpu->arch.shared;
21489b68c96SAlexander Graf 			ulong shared = (ulong)vcpu->arch.shared;
21589b68c96SAlexander Graf 			void *new_shared;
21689b68c96SAlexander Graf 
21789b68c96SAlexander Graf 			shared &= PAGE_MASK;
21889b68c96SAlexander Graf 			shared |= vcpu->arch.magic_page_pa & 0xf000;
21989b68c96SAlexander Graf 			new_shared = (void*)shared;
22089b68c96SAlexander Graf 			memcpy(new_shared, old_shared, 0x1000);
22189b68c96SAlexander Graf 			vcpu->arch.shared = new_shared;
22289b68c96SAlexander Graf 		}
22389b68c96SAlexander Graf #endif
22489b68c96SAlexander Graf 
225b5904972SScott Wood 		r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
2267508e16cSAlexander Graf 
227fdcf8bd7SStuart Yoder 		r = EV_SUCCESS;
2285fc87407SAlexander Graf 		break;
2295fc87407SAlexander Graf 	}
230fdcf8bd7SStuart Yoder 	case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
231fdcf8bd7SStuart Yoder 		r = EV_SUCCESS;
232bf7ca4bdSAlexander Graf #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
2335fc87407SAlexander Graf 		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
2345fc87407SAlexander Graf #endif
2352a342ed5SAlexander Graf 
2362a342ed5SAlexander Graf 		/* Second return value is in r4 */
2372a342ed5SAlexander Graf 		break;
2389202e076SLiu Yu-B13201 	case EV_HCALL_TOKEN(EV_IDLE):
2399202e076SLiu Yu-B13201 		r = EV_SUCCESS;
24091b99ea7SSean Christopherson 		kvm_vcpu_halt(vcpu);
2419202e076SLiu Yu-B13201 		break;
2422a342ed5SAlexander Graf 	default:
243fdcf8bd7SStuart Yoder 		r = EV_UNIMPLEMENTED;
2442a342ed5SAlexander Graf 		break;
2452a342ed5SAlexander Graf 	}
2462a342ed5SAlexander Graf 
2477508e16cSAlexander Graf 	kvmppc_set_gpr(vcpu, 4, r2);
2487508e16cSAlexander Graf 
2492a342ed5SAlexander Graf 	return r;
2502a342ed5SAlexander Graf }
2512ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
252bbf45ba5SHollis Blanchard 
kvmppc_sanity_check(struct kvm_vcpu * vcpu)253af8f38b3SAlexander Graf int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
254af8f38b3SAlexander Graf {
255af8f38b3SAlexander Graf 	int r = false;
256af8f38b3SAlexander Graf 
257af8f38b3SAlexander Graf 	/* We have to know what CPU to virtualize */
258af8f38b3SAlexander Graf 	if (!vcpu->arch.pvr)
259af8f38b3SAlexander Graf 		goto out;
260af8f38b3SAlexander Graf 
261af8f38b3SAlexander Graf 	/* PAPR only works with book3s_64 */
262af8f38b3SAlexander Graf 	if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
263af8f38b3SAlexander Graf 		goto out;
264af8f38b3SAlexander Graf 
265af8f38b3SAlexander Graf 	/* HV KVM can only do PAPR mode for now */
266a78b55d1SAneesh Kumar K.V 	if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
267af8f38b3SAlexander Graf 		goto out;
268af8f38b3SAlexander Graf 
269d30f6e48SScott Wood #ifdef CONFIG_KVM_BOOKE_HV
270d30f6e48SScott Wood 	if (!cpu_has_feature(CPU_FTR_EMB_HV))
271d30f6e48SScott Wood 		goto out;
272d30f6e48SScott Wood #endif
273d30f6e48SScott Wood 
274af8f38b3SAlexander Graf 	r = true;
275af8f38b3SAlexander Graf 
276af8f38b3SAlexander Graf out:
277af8f38b3SAlexander Graf 	vcpu->arch.sane = r;
278af8f38b3SAlexander Graf 	return r ? 0 : -EINVAL;
279af8f38b3SAlexander Graf }
2802ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
281af8f38b3SAlexander Graf 
kvmppc_emulate_mmio(struct kvm_vcpu * vcpu)2828c99d345STianjia Zhang int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
283bbf45ba5SHollis Blanchard {
284bbf45ba5SHollis Blanchard 	enum emulation_result er;
285bbf45ba5SHollis Blanchard 	int r;
286bbf45ba5SHollis Blanchard 
287d69614a2SAlexander Graf 	er = kvmppc_emulate_loadstore(vcpu);
288bbf45ba5SHollis Blanchard 	switch (er) {
289bbf45ba5SHollis Blanchard 	case EMULATE_DONE:
290bbf45ba5SHollis Blanchard 		/* Future optimization: only reload non-volatiles if they were
291bbf45ba5SHollis Blanchard 		 * actually modified. */
292bbf45ba5SHollis Blanchard 		r = RESUME_GUEST_NV;
293bbf45ba5SHollis Blanchard 		break;
29451f04726SMihai Caraman 	case EMULATE_AGAIN:
29551f04726SMihai Caraman 		r = RESUME_GUEST;
29651f04726SMihai Caraman 		break;
297bbf45ba5SHollis Blanchard 	case EMULATE_DO_MMIO:
2988c99d345STianjia Zhang 		vcpu->run->exit_reason = KVM_EXIT_MMIO;
299bbf45ba5SHollis Blanchard 		/* We must reload nonvolatiles because "update" load/store
300bbf45ba5SHollis Blanchard 		 * instructions modify register state. */
301bbf45ba5SHollis Blanchard 		/* Future optimization: only reload non-volatiles if they were
302bbf45ba5SHollis Blanchard 		 * actually modified. */
303bbf45ba5SHollis Blanchard 		r = RESUME_HOST_NV;
304bbf45ba5SHollis Blanchard 		break;
305bbf45ba5SHollis Blanchard 	case EMULATE_FAIL:
30651f04726SMihai Caraman 	{
307*acf17878SPaul Mackerras 		ppc_inst_t last_inst;
30851f04726SMihai Caraman 
3098d0eff63SAlexander Graf 		kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
310349fbfe9SFabiano Rosas 		kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
311*acf17878SPaul Mackerras 				      ppc_inst_val(last_inst));
312c1c8a663SFabiano Rosas 
313c1c8a663SFabiano Rosas 		/*
314c1c8a663SFabiano Rosas 		 * Injecting a Data Storage here is a bit more
315c1c8a663SFabiano Rosas 		 * accurate since the instruction that caused the
316c1c8a663SFabiano Rosas 		 * access could still be a valid one.
317c1c8a663SFabiano Rosas 		 */
318c1c8a663SFabiano Rosas 		if (!IS_ENABLED(CONFIG_BOOKE)) {
319c1c8a663SFabiano Rosas 			ulong dsisr = DSISR_BADACCESS;
320c1c8a663SFabiano Rosas 
321c1c8a663SFabiano Rosas 			if (vcpu->mmio_is_write)
322c1c8a663SFabiano Rosas 				dsisr |= DSISR_ISSTORE;
323c1c8a663SFabiano Rosas 
3246cd5c1dbSNicholas Piggin 			kvmppc_core_queue_data_storage(vcpu,
3256cd5c1dbSNicholas Piggin 					kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
326460ba21dSNicholas Piggin 					vcpu->arch.vaddr_accessed, dsisr);
327c1c8a663SFabiano Rosas 		} else {
328c1c8a663SFabiano Rosas 			/*
329c1c8a663SFabiano Rosas 			 * BookE does not send a SIGBUS on a bad
330c1c8a663SFabiano Rosas 			 * fault, so use a Program interrupt instead
331c1c8a663SFabiano Rosas 			 * to avoid a fault loop.
332c1c8a663SFabiano Rosas 			 */
333c1c8a663SFabiano Rosas 			kvmppc_core_queue_program(vcpu, 0);
334c1c8a663SFabiano Rosas 		}
335c1c8a663SFabiano Rosas 
336349fbfe9SFabiano Rosas 		r = RESUME_GUEST;
337bbf45ba5SHollis Blanchard 		break;
33851f04726SMihai Caraman 	}
339bbf45ba5SHollis Blanchard 	default:
3405a33169eSAlexander Graf 		WARN_ON(1);
3415a33169eSAlexander Graf 		r = RESUME_GUEST;
342bbf45ba5SHollis Blanchard 	}
343bbf45ba5SHollis Blanchard 
344bbf45ba5SHollis Blanchard 	return r;
345bbf45ba5SHollis Blanchard }
3462ba9f0d8SAneesh Kumar K.V EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
347bbf45ba5SHollis Blanchard 
kvmppc_st(struct kvm_vcpu * vcpu,ulong * eaddr,int size,void * ptr,bool data)34835c4a733SAlexander Graf int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
34935c4a733SAlexander Graf 	      bool data)
35035c4a733SAlexander Graf {
351c12fb43cSAlexander Graf 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
35235c4a733SAlexander Graf 	struct kvmppc_pte pte;
353cc6929ccSSuraj Jitindar Singh 	int r = -EINVAL;
35435c4a733SAlexander Graf 
35535c4a733SAlexander Graf 	vcpu->stat.st++;
35635c4a733SAlexander Graf 
357cc6929ccSSuraj Jitindar Singh 	if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
358cc6929ccSSuraj Jitindar Singh 		r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
359cc6929ccSSuraj Jitindar Singh 							    size);
360cc6929ccSSuraj Jitindar Singh 
361cc6929ccSSuraj Jitindar Singh 	if ((!r) || (r == -EAGAIN))
362cc6929ccSSuraj Jitindar Singh 		return r;
363cc6929ccSSuraj Jitindar Singh 
36435c4a733SAlexander Graf 	r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
36535c4a733SAlexander Graf 			 XLATE_WRITE, &pte);
36635c4a733SAlexander Graf 	if (r < 0)
36735c4a733SAlexander Graf 		return r;
36835c4a733SAlexander Graf 
36935c4a733SAlexander Graf 	*eaddr = pte.raddr;
37035c4a733SAlexander Graf 
37135c4a733SAlexander Graf 	if (!pte.may_write)
37235c4a733SAlexander Graf 		return -EPERM;
37335c4a733SAlexander Graf 
374c12fb43cSAlexander Graf 	/* Magic page override */
375c12fb43cSAlexander Graf 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
376c12fb43cSAlexander Graf 	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
377c12fb43cSAlexander Graf 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
378c12fb43cSAlexander Graf 		void *magic = vcpu->arch.shared;
379c12fb43cSAlexander Graf 		magic += pte.eaddr & 0xfff;
380c12fb43cSAlexander Graf 		memcpy(magic, ptr, size);
381c12fb43cSAlexander Graf 		return EMULATE_DONE;
382c12fb43cSAlexander Graf 	}
383c12fb43cSAlexander Graf 
38435c4a733SAlexander Graf 	if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
38535c4a733SAlexander Graf 		return EMULATE_DO_MMIO;
38635c4a733SAlexander Graf 
38735c4a733SAlexander Graf 	return EMULATE_DONE;
38835c4a733SAlexander Graf }
38935c4a733SAlexander Graf EXPORT_SYMBOL_GPL(kvmppc_st);
39035c4a733SAlexander Graf 
kvmppc_ld(struct kvm_vcpu * vcpu,ulong * eaddr,int size,void * ptr,bool data)39135c4a733SAlexander Graf int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
39235c4a733SAlexander Graf 		      bool data)
39335c4a733SAlexander Graf {
394c12fb43cSAlexander Graf 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
39535c4a733SAlexander Graf 	struct kvmppc_pte pte;
396cc6929ccSSuraj Jitindar Singh 	int rc = -EINVAL;
39735c4a733SAlexander Graf 
39835c4a733SAlexander Graf 	vcpu->stat.ld++;
39935c4a733SAlexander Graf 
400cc6929ccSSuraj Jitindar Singh 	if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
401cc6929ccSSuraj Jitindar Singh 		rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
402cc6929ccSSuraj Jitindar Singh 							      size);
403cc6929ccSSuraj Jitindar Singh 
404cc6929ccSSuraj Jitindar Singh 	if ((!rc) || (rc == -EAGAIN))
405cc6929ccSSuraj Jitindar Singh 		return rc;
406cc6929ccSSuraj Jitindar Singh 
40735c4a733SAlexander Graf 	rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
40835c4a733SAlexander Graf 			  XLATE_READ, &pte);
40935c4a733SAlexander Graf 	if (rc)
41035c4a733SAlexander Graf 		return rc;
41135c4a733SAlexander Graf 
41235c4a733SAlexander Graf 	*eaddr = pte.raddr;
41335c4a733SAlexander Graf 
41435c4a733SAlexander Graf 	if (!pte.may_read)
41535c4a733SAlexander Graf 		return -EPERM;
41635c4a733SAlexander Graf 
41735c4a733SAlexander Graf 	if (!data && !pte.may_execute)
41835c4a733SAlexander Graf 		return -ENOEXEC;
41935c4a733SAlexander Graf 
420c12fb43cSAlexander Graf 	/* Magic page override */
421c12fb43cSAlexander Graf 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
422c12fb43cSAlexander Graf 	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
423c12fb43cSAlexander Graf 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
424c12fb43cSAlexander Graf 		void *magic = vcpu->arch.shared;
425c12fb43cSAlexander Graf 		magic += pte.eaddr & 0xfff;
426c12fb43cSAlexander Graf 		memcpy(ptr, magic, size);
427c12fb43cSAlexander Graf 		return EMULATE_DONE;
428c12fb43cSAlexander Graf 	}
429c12fb43cSAlexander Graf 
4302031f287SSean Christopherson 	kvm_vcpu_srcu_read_lock(vcpu);
4311508c22fSAlexey Kardashevskiy 	rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
4322031f287SSean Christopherson 	kvm_vcpu_srcu_read_unlock(vcpu);
4331508c22fSAlexey Kardashevskiy 	if (rc)
434c45c5514SAlexander Graf 		return EMULATE_DO_MMIO;
43535c4a733SAlexander Graf 
43635c4a733SAlexander Graf 	return EMULATE_DONE;
43735c4a733SAlexander Graf }
43835c4a733SAlexander Graf EXPORT_SYMBOL_GPL(kvmppc_ld);
43935c4a733SAlexander Graf 
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)440e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
441bbf45ba5SHollis Blanchard {
442cbbc58d4SAneesh Kumar K.V 	struct kvmppc_ops *kvm_ops = NULL;
4434feb74aaSFabiano Rosas 	int r;
4444feb74aaSFabiano Rosas 
445cbbc58d4SAneesh Kumar K.V 	/*
446cbbc58d4SAneesh Kumar K.V 	 * if we have both HV and PR enabled, default is HV
447cbbc58d4SAneesh Kumar K.V 	 */
448cbbc58d4SAneesh Kumar K.V 	if (type == 0) {
449cbbc58d4SAneesh Kumar K.V 		if (kvmppc_hv_ops)
450cbbc58d4SAneesh Kumar K.V 			kvm_ops = kvmppc_hv_ops;
451cbbc58d4SAneesh Kumar K.V 		else
452cbbc58d4SAneesh Kumar K.V 			kvm_ops = kvmppc_pr_ops;
453cbbc58d4SAneesh Kumar K.V 		if (!kvm_ops)
454cbbc58d4SAneesh Kumar K.V 			goto err_out;
455cbbc58d4SAneesh Kumar K.V 	} else	if (type == KVM_VM_PPC_HV) {
456cbbc58d4SAneesh Kumar K.V 		if (!kvmppc_hv_ops)
457cbbc58d4SAneesh Kumar K.V 			goto err_out;
458cbbc58d4SAneesh Kumar K.V 		kvm_ops = kvmppc_hv_ops;
459cbbc58d4SAneesh Kumar K.V 	} else if (type == KVM_VM_PPC_PR) {
460cbbc58d4SAneesh Kumar K.V 		if (!kvmppc_pr_ops)
461cbbc58d4SAneesh Kumar K.V 			goto err_out;
462cbbc58d4SAneesh Kumar K.V 		kvm_ops = kvmppc_pr_ops;
463cbbc58d4SAneesh Kumar K.V 	} else
464cbbc58d4SAneesh Kumar K.V 		goto err_out;
465e08b9637SCarsten Otte 
4664feb74aaSFabiano Rosas 	if (!try_module_get(kvm_ops->owner))
467cbbc58d4SAneesh Kumar K.V 		return -ENOENT;
468cbbc58d4SAneesh Kumar K.V 
469cbbc58d4SAneesh Kumar K.V 	kvm->arch.kvm_ops = kvm_ops;
4704feb74aaSFabiano Rosas 	r = kvmppc_core_init_vm(kvm);
4714feb74aaSFabiano Rosas 	if (r)
4724feb74aaSFabiano Rosas 		module_put(kvm_ops->owner);
4734feb74aaSFabiano Rosas 	return r;
474cbbc58d4SAneesh Kumar K.V err_out:
475cbbc58d4SAneesh Kumar K.V 	return -EINVAL;
476bbf45ba5SHollis Blanchard }
477bbf45ba5SHollis Blanchard 
kvm_arch_destroy_vm(struct kvm * kvm)478d89f5effSJan Kiszka void kvm_arch_destroy_vm(struct kvm *kvm)
479bbf45ba5SHollis Blanchard {
480e17769ebSSuresh E. Warrier #ifdef CONFIG_KVM_XICS
481e17769ebSSuresh E. Warrier 	/*
482e17769ebSSuresh E. Warrier 	 * We call kick_all_cpus_sync() to ensure that all
483e17769ebSSuresh E. Warrier 	 * CPUs have executed any pending IPIs before we
484e17769ebSSuresh E. Warrier 	 * continue and free VCPUs structures below.
485e17769ebSSuresh E. Warrier 	 */
486e17769ebSSuresh E. Warrier 	if (is_kvmppc_hv_enabled(kvm))
487e17769ebSSuresh E. Warrier 		kick_all_cpus_sync();
488e17769ebSSuresh E. Warrier #endif
489e17769ebSSuresh E. Warrier 
49027592ae8SMarc Zyngier 	kvm_destroy_vcpus(kvm);
491988a2caeSGleb Natapov 
492988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
493f9e0554dSPaul Mackerras 
494f9e0554dSPaul Mackerras 	kvmppc_core_destroy_vm(kvm);
495f9e0554dSPaul Mackerras 
496988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
497cbbc58d4SAneesh Kumar K.V 
498cbbc58d4SAneesh Kumar K.V 	/* drop the module reference */
499cbbc58d4SAneesh Kumar K.V 	module_put(kvm->arch.kvm_ops->owner);
500bbf45ba5SHollis Blanchard }
501bbf45ba5SHollis Blanchard 
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)502784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
503bbf45ba5SHollis Blanchard {
504bbf45ba5SHollis Blanchard 	int r;
5057a58777aSAlexander Graf 	/* Assume we're using HV mode when the HV module is loaded */
506cbbc58d4SAneesh Kumar K.V 	int hv_enabled = kvmppc_hv_ops ? 1 : 0;
507bbf45ba5SHollis Blanchard 
5087a58777aSAlexander Graf 	if (kvm) {
5097a58777aSAlexander Graf 		/*
5107a58777aSAlexander Graf 		 * Hooray - we know which VM type we're running on. Depend on
5117a58777aSAlexander Graf 		 * that rather than the guess above.
5127a58777aSAlexander Graf 		 */
5137a58777aSAlexander Graf 		hv_enabled = is_kvmppc_hv_enabled(kvm);
5147a58777aSAlexander Graf 	}
5157a58777aSAlexander Graf 
516bbf45ba5SHollis Blanchard 	switch (ext) {
5175ce941eeSScott Wood #ifdef CONFIG_BOOKE
5185ce941eeSScott Wood 	case KVM_CAP_PPC_BOOKE_SREGS:
519f61c94bbSBharat Bhushan 	case KVM_CAP_PPC_BOOKE_WATCHDOG:
5201c810636SAlexander Graf 	case KVM_CAP_PPC_EPR:
5215ce941eeSScott Wood #else
522e15a1137SAlexander Graf 	case KVM_CAP_PPC_SEGSTATE:
5231022fc3dSAlexander Graf 	case KVM_CAP_PPC_HIOR:
524930b412aSAlexander Graf 	case KVM_CAP_PPC_PAPR:
5255ce941eeSScott Wood #endif
52618978768SAlexander Graf 	case KVM_CAP_PPC_UNSET_IRQ:
5277b4203e8SAlexander Graf 	case KVM_CAP_PPC_IRQ_LEVEL:
52871fbfd5fSAlexander Graf 	case KVM_CAP_ENABLE_CAP:
529e24ed81fSAlexander Graf 	case KVM_CAP_ONE_REG:
5300e673fb6SAlexander Graf 	case KVM_CAP_IOEVENTFD:
5315df554adSScott Wood 	case KVM_CAP_DEVICE_CTRL:
532460df4c1SPaolo Bonzini 	case KVM_CAP_IMMEDIATE_EXIT:
533b9b2782cSPeter Xu 	case KVM_CAP_SET_GUEST_DEBUG:
534de56a948SPaul Mackerras 		r = 1;
535de56a948SPaul Mackerras 		break;
5361a9167a2SFabiano Rosas 	case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
537de56a948SPaul Mackerras 	case KVM_CAP_PPC_PAIRED_SINGLES:
538ad0a048bSAlexander Graf 	case KVM_CAP_PPC_OSI:
53915711e9cSAlexander Graf 	case KVM_CAP_PPC_GET_PVINFO:
540bf7ca4bdSAlexander Graf #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
541dc83b8bcSScott Wood 	case KVM_CAP_SW_TLB:
542dc83b8bcSScott Wood #endif
543699cc876SAneesh Kumar K.V 		/* We support this only for PR */
544cbbc58d4SAneesh Kumar K.V 		r = !hv_enabled;
545e15a1137SAlexander Graf 		break;
546699cc876SAneesh Kumar K.V #ifdef CONFIG_KVM_MPIC
547699cc876SAneesh Kumar K.V 	case KVM_CAP_IRQ_MPIC:
548699cc876SAneesh Kumar K.V 		r = 1;
549699cc876SAneesh Kumar K.V 		break;
550699cc876SAneesh Kumar K.V #endif
551699cc876SAneesh Kumar K.V 
552f31e65e1SBenjamin Herrenschmidt #ifdef CONFIG_PPC_BOOK3S_64
55354738c09SDavid Gibson 	case KVM_CAP_SPAPR_TCE:
55458ded420SAlexey Kardashevskiy 	case KVM_CAP_SPAPR_TCE_64:
555693ac10aSSuraj Jitindar Singh 		r = 1;
556693ac10aSSuraj Jitindar Singh 		break;
557121f80baSAlexey Kardashevskiy 	case KVM_CAP_SPAPR_TCE_VFIO:
558693ac10aSSuraj Jitindar Singh 		r = !!cpu_has_feature(CPU_FTR_HVMODE);
559693ac10aSSuraj Jitindar Singh 		break;
5608e591cb7SMichael Ellerman 	case KVM_CAP_PPC_RTAS:
561f2e91042SAlexander Graf 	case KVM_CAP_PPC_FIXUP_HCALL:
562699a0ea0SPaul Mackerras 	case KVM_CAP_PPC_ENABLE_HCALL:
5635975a2e0SPaul Mackerras #ifdef CONFIG_KVM_XICS
5645975a2e0SPaul Mackerras 	case KVM_CAP_IRQ_XICS:
5655975a2e0SPaul Mackerras #endif
5663214d01fSPaul Mackerras 	case KVM_CAP_PPC_GET_CPU_CHAR:
56754738c09SDavid Gibson 		r = 1;
56854738c09SDavid Gibson 		break;
569eacc56bbSCédric Le Goater #ifdef CONFIG_KVM_XIVE
570eacc56bbSCédric Le Goater 	case KVM_CAP_PPC_IRQ_XIVE:
571eacc56bbSCédric Le Goater 		/*
5723fab2d10SCédric Le Goater 		 * We need XIVE to be enabled on the platform (implies
5733fab2d10SCédric Le Goater 		 * a POWER9 processor) and the PowerNV platform, as
5743fab2d10SCédric Le Goater 		 * nested is not yet supported.
575eacc56bbSCédric Le Goater 		 */
5762ad7a27dSPaul Mackerras 		r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
5772ad7a27dSPaul Mackerras 			kvmppc_xive_native_supported();
578eacc56bbSCédric Le Goater 		break;
579eacc56bbSCédric Le Goater #endif
580a8acaeceSDavid Gibson 
581a8acaeceSDavid Gibson #ifdef CONFIG_HAVE_KVM_IRQFD
582a8acaeceSDavid Gibson 	case KVM_CAP_IRQFD_RESAMPLE:
583a8acaeceSDavid Gibson 		r = !xive_enabled();
584f31e65e1SBenjamin Herrenschmidt 		break;
585699cc876SAneesh Kumar K.V #endif
586371fefd6SPaul Mackerras 
587699cc876SAneesh Kumar K.V 	case KVM_CAP_PPC_ALLOC_HTAB:
58857900694SPaul Mackerras 		r = hv_enabled;
58957900694SPaul Mackerras 		break;
59057900694SPaul Mackerras #endif /* CONFIG_PPC_BOOK3S_64 */
59157900694SPaul Mackerras #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
5923c313524SPaul Mackerras 	case KVM_CAP_PPC_SMT:
59357900694SPaul Mackerras 		r = 0;
59445c940baSPaul Mackerras 		if (kvm) {
59545c940baSPaul Mackerras 			if (kvm->arch.emul_smt_mode > 1)
59645c940baSPaul Mackerras 				r = kvm->arch.emul_smt_mode;
59745c940baSPaul Mackerras 			else
59845c940baSPaul Mackerras 				r = kvm->arch.smt_mode;
599371fefd6SPaul Mackerras 		} else if (hv_enabled) {
6002ed4f9ddSPaul Mackerras 			if (cpu_has_feature(CPU_FTR_ARCH_300))
6012ed4f9ddSPaul Mackerras 				r = 1;
6022ed4f9ddSPaul Mackerras 			else
6032ed4f9ddSPaul Mackerras 				r = threads_per_subcore;
6042ed4f9ddSPaul Mackerras 		}
6052ed4f9ddSPaul Mackerras 		break;
6062ed4f9ddSPaul Mackerras 	case KVM_CAP_PPC_SMT_POSSIBLE:
6072ed4f9ddSPaul Mackerras 		r = 1;
6082ed4f9ddSPaul Mackerras 		if (hv_enabled) {
6092ed4f9ddSPaul Mackerras 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
610aa04b4ccSPaul Mackerras 				r = ((threads_per_subcore << 1) - 1);
611c17b98cfSPaul Mackerras 			else
612aa04b4ccSPaul Mackerras 				/* P9 can emulate dbells, so allow any mode */
613e928e9cbSMichael Ellerman 				r = 8 | 4 | 2 | 1;
614e928e9cbSMichael Ellerman 		}
615e928e9cbSMichael Ellerman 		break;
616c9270132SPaul Mackerras 	case KVM_CAP_PPC_RMA:
6178cf4ecc0SPaul Mackerras 		r = 0;
618c9270132SPaul Mackerras 		break;
619c9270132SPaul Mackerras 	case KVM_CAP_PPC_HWRNG:
620a722076eSFabiano Rosas 		r = kvmppc_hwrng_present();
621a722076eSFabiano Rosas 		break;
622c9270132SPaul Mackerras 	case KVM_CAP_PPC_MMU_RADIX:
623aa069a99SPaul Mackerras 		r = !!(hv_enabled && radix_enabled());
624aa069a99SPaul Mackerras 		break;
625aa069a99SPaul Mackerras 	case KVM_CAP_PPC_MMU_HASH_V3:
626aa069a99SPaul Mackerras 		r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
62754738c09SDavid Gibson 		       kvmppc_hv_ops->hash_v3_possible());
628f4800b1fSAlexander Graf 		break;
629699cc876SAneesh Kumar K.V 	case KVM_CAP_PPC_NESTED_HV:
630c17b98cfSPaul Mackerras 		r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
631f4800b1fSAlexander Graf 		       !kvmppc_hv_ops->enable_nested(NULL));
632f4800b1fSAlexander Graf 		break;
633f4800b1fSAlexander Graf #endif
634f4800b1fSAlexander Graf 	case KVM_CAP_SYNC_MMU:
635a2932923SPaul Mackerras #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
636699cc876SAneesh Kumar K.V 		r = hv_enabled;
637699cc876SAneesh Kumar K.V #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
638a2932923SPaul Mackerras 		r = 1;
639cbbc58d4SAneesh Kumar K.V #else
640a2932923SPaul Mackerras 		r = 0;
641f4800b1fSAlexander Graf #endif
642b5434032SMatt Evans 		break;
643b5434032SMatt Evans #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
644b5434032SMatt Evans 	case KVM_CAP_PPC_HTAB_FD:
645b5434032SMatt Evans 		r = hv_enabled;
646b5434032SMatt Evans 		break;
647b5434032SMatt Evans #endif
648b5434032SMatt Evans 	case KVM_CAP_NR_VCPUS:
649cbbc58d4SAneesh Kumar K.V 		/*
650b7915d55SVitaly Kuznetsov 		 * Recommending a number of CPUs is somewhat arbitrary; we
651699cc876SAneesh Kumar K.V 		 * return the number of present CPUs for -HV (since a host
652b7915d55SVitaly Kuznetsov 		 * will have secondary threads "offline"), and for other KVM
653b5434032SMatt Evans 		 * implementations just count online CPUs.
654b5434032SMatt Evans 		 */
655b5434032SMatt Evans 		if (hv_enabled)
656b5434032SMatt Evans 			r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
657a86cb413SThomas Huth 		else
658a1c42ddeSJuergen Gross 			r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
659a86cb413SThomas Huth 		break;
6605b74716eSBenjamin Herrenschmidt 	case KVM_CAP_MAX_VCPUS:
6615b74716eSBenjamin Herrenschmidt 		r = KVM_MAX_VCPUS;
6625b74716eSBenjamin Herrenschmidt 		break;
6635b74716eSBenjamin Herrenschmidt 	case KVM_CAP_MAX_VCPU_ID:
664d3695aa4SAlexey Kardashevskiy 		r = KVM_MAX_VCPU_IDS;
665d3695aa4SAlexey Kardashevskiy 		break;
666d3695aa4SAlexey Kardashevskiy #ifdef CONFIG_PPC_BOOK3S_64
667050f2339SDavid Gibson 	case KVM_CAP_PPC_GET_SMMU_INFO:
668790a9df5SDavid Gibson 		r = 1;
669050f2339SDavid Gibson 		break;
6705b74716eSBenjamin Herrenschmidt 	case KVM_CAP_SPAPR_MULTITCE:
671134764edSAravinda Prasad 		r = 1;
672134764edSAravinda Prasad 		break;
673134764edSAravinda Prasad 	case KVM_CAP_SPAPR_RESIZE_HPT:
674134764edSAravinda Prasad 		r = !!hv_enabled;
675134764edSAravinda Prasad 		break;
6764bb3c7a0SPaul Mackerras #endif
67723528bb2SSam Bobroff #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
678d234d68eSSimon Guo 	case KVM_CAP_PPC_FWNMI:
679d234d68eSSimon Guo 		r = hv_enabled;
68023528bb2SSam Bobroff 		break;
6814bb3c7a0SPaul Mackerras #endif
6829a5788c6SPaul Mackerras #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
6839a5788c6SPaul Mackerras 	case KVM_CAP_PPC_HTM:
6849a5788c6SPaul Mackerras 		r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
6859a5788c6SPaul Mackerras 		     (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
6869a5788c6SPaul Mackerras 		break;
687d9a47edaSRavi Bangoria #endif
688d9a47edaSRavi Bangoria #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
689d9a47edaSRavi Bangoria 	case KVM_CAP_PPC_SECURE_GUEST:
690d9a47edaSRavi Bangoria 		r = hv_enabled && kvmppc_hv_ops->enable_svm &&
691b87cc116SBharata B Rao 			!kvmppc_hv_ops->enable_svm(NULL);
692b87cc116SBharata B Rao 		break;
693b87cc116SBharata B Rao 	case KVM_CAP_PPC_DAWR1:
6949a5788c6SPaul Mackerras 		r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
695f771b557SNicholas Piggin 		       !kvmppc_hv_ops->enable_dawr1(NULL));
696f771b557SNicholas Piggin 		break;
697f771b557SNicholas Piggin 	case KVM_CAP_PPC_RPT_INVALIDATE:
698f771b557SNicholas Piggin 		r = 1;
699f771b557SNicholas Piggin 		break;
700f771b557SNicholas Piggin #endif
701f771b557SNicholas Piggin 	case KVM_CAP_PPC_AIL_MODE_3:
702f771b557SNicholas Piggin 		r = 0;
703f771b557SNicholas Piggin 		/*
704f771b557SNicholas Piggin 		 * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
705f771b557SNicholas Piggin 		 * The POWER9s can support it if the guest runs in hash mode,
706f771b557SNicholas Piggin 		 * but QEMU doesn't necessarily query the capability in time.
707f771b557SNicholas Piggin 		 */
708f771b557SNicholas Piggin 		if (hv_enabled) {
709f771b557SNicholas Piggin 			if (kvmhv_on_pseries()) {
710f771b557SNicholas Piggin 				if (pseries_reloc_on_exception())
711f771b557SNicholas Piggin 					r = 1;
712bbf45ba5SHollis Blanchard 			} else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
713bbf45ba5SHollis Blanchard 				  !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
714bbf45ba5SHollis Blanchard 				r = 1;
715bbf45ba5SHollis Blanchard 			}
716bbf45ba5SHollis Blanchard 		}
717bbf45ba5SHollis Blanchard 		break;
718bbf45ba5SHollis Blanchard 	default:
719bbf45ba5SHollis Blanchard 		r = 0;
720bbf45ba5SHollis Blanchard 		break;
721bbf45ba5SHollis Blanchard 	}
722bbf45ba5SHollis Blanchard 	return r;
723bbf45ba5SHollis Blanchard 
724bbf45ba5SHollis Blanchard }
725bbf45ba5SHollis Blanchard 
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)726e96c81eeSSean Christopherson long kvm_arch_dev_ioctl(struct file *filp,
727db3fe4ebSTakuya Yoshikawa                         unsigned int ioctl, unsigned long arg)
728e96c81eeSSean Christopherson {
729db3fe4ebSTakuya Yoshikawa 	return -EINVAL;
730db3fe4ebSTakuya Yoshikawa }
731f7784b8eSMarcelo Tosatti 
kvm_arch_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)732537a17b3SSean Christopherson void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
733537a17b3SSean Christopherson {
7347b6195a9STakuya Yoshikawa 	kvmppc_core_free_memslot(kvm, slot);
735bbf45ba5SHollis Blanchard }
736eaaaed13SSean Christopherson 
kvm_arch_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)737bbf45ba5SHollis Blanchard int kvm_arch_prepare_memory_region(struct kvm *kvm,
738bbf45ba5SHollis Blanchard 				   const struct kvm_memory_slot *old,
739f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *new,
7409d4c197cSSean Christopherson 				   enum kvm_mr_change change)
741f36f3f28SPaolo Bonzini {
7428482644aSTakuya Yoshikawa 	return kvmppc_core_prepare_memory_region(kvm, old, new, change);
743f7784b8eSMarcelo Tosatti }
744eaaaed13SSean Christopherson 
kvm_arch_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)745f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
746f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *old,
7472df72e9bSMarcelo Tosatti 				   const struct kvm_memory_slot *new,
7482df72e9bSMarcelo Tosatti 				   enum kvm_mr_change change)
74934d4cb8fSMarcelo Tosatti {
750dfe49dbdSPaul Mackerras 	kvmppc_core_commit_memory_region(kvm, old, new, change);
75134d4cb8fSMarcelo Tosatti }
75234d4cb8fSMarcelo Tosatti 
kvm_arch_flush_shadow_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)753897cc38eSSean Christopherson void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
754897cc38eSSean Christopherson 				   struct kvm_memory_slot *slot)
755897cc38eSSean Christopherson {
756897cc38eSSean Christopherson 	kvmppc_core_flush_memslot(kvm, slot);
757897cc38eSSean Christopherson }
75874ce2e60SSean Christopherson 
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)75974ce2e60SSean Christopherson int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
76074ce2e60SSean Christopherson {
76174ce2e60SSean Christopherson 	return 0;
76274ce2e60SSean Christopherson }
76374ce2e60SSean Christopherson 
kvmppc_decrementer_wakeup(struct hrtimer * timer)76474ce2e60SSean Christopherson static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
76574ce2e60SSean Christopherson {
76674ce2e60SSean Christopherson 	struct kvm_vcpu *vcpu;
76774ce2e60SSean Christopherson 
768e529ef66SSean Christopherson 	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
769bbf45ba5SHollis Blanchard 	kvmppc_decrementer_func(vcpu);
770c50bfbdcSSean Christopherson 
771c50bfbdcSSean Christopherson 	return HRTIMER_NORESTART;
77274ce2e60SSean Christopherson }
77374ce2e60SSean Christopherson 
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)77474ce2e60SSean Christopherson int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
77574ce2e60SSean Christopherson {
77674ce2e60SSean Christopherson 	int err;
77774ce2e60SSean Christopherson 
77874ce2e60SSean Christopherson 	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
779ff030fdfSSean Christopherson 	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
780e529ef66SSean Christopherson 
781ff030fdfSSean Christopherson #ifdef CONFIG_KVM_EXIT_TIMING
78274ce2e60SSean Christopherson 	mutex_init(&vcpu->arch.exit_timing_lock);
78374ce2e60SSean Christopherson #endif
78474ce2e60SSean Christopherson 	err = kvmppc_subarch_vcpu_init(vcpu);
78574ce2e60SSean Christopherson 	if (err)
786510958e9SSean Christopherson 		return err;
787510958e9SSean Christopherson 
788e529ef66SSean Christopherson 	err = kvmppc_core_vcpu_create(vcpu);
78974ce2e60SSean Christopherson 	if (err)
79074ce2e60SSean Christopherson 		goto out_vcpu_uninit;
79174ce2e60SSean Christopherson 
79274ce2e60SSean Christopherson 	rcuwait_init(&vcpu->arch.wait);
793bbf45ba5SHollis Blanchard 	vcpu->arch.waitp = &vcpu->arch.wait;
794bbf45ba5SHollis Blanchard 	return 0;
79531928aa5SDominik Dingel 
79642897d86SMarcelo Tosatti out_vcpu_uninit:
79742897d86SMarcelo Tosatti 	kvmppc_subarch_vcpu_uninit(vcpu);
79842897d86SMarcelo Tosatti 	return err;
799d5279f3aSSean Christopherson }
800bbf45ba5SHollis Blanchard 
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)801a595405dSAlexander Graf void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
802a595405dSAlexander Graf {
803a595405dSAlexander Graf }
804eb1e4f43SScott Wood 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)805eb1e4f43SScott Wood void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
806eb1e4f43SScott Wood {
807eb1e4f43SScott Wood 	/* Make sure we're not using the vcpu anymore */
808bc5ad3f3SBenjamin Herrenschmidt 	hrtimer_cancel(&vcpu->arch.dec_timer);
80903f95332SPaul Mackerras 
8105af50993SBenjamin Herrenschmidt 	switch (vcpu->arch.irq_type) {
8115af50993SBenjamin Herrenschmidt 	case KVMPPC_IRQ_MPIC:
812bc5ad3f3SBenjamin Herrenschmidt 		kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
813bc5ad3f3SBenjamin Herrenschmidt 		break;
814eacc56bbSCédric Le Goater 	case KVMPPC_IRQ_XICS:
815eacc56bbSCédric Le Goater 		if (xics_on_xive())
816eacc56bbSCédric Le Goater 			kvmppc_xive_cleanup_vcpu(vcpu);
817eb1e4f43SScott Wood 		else
818eb1e4f43SScott Wood 			kvmppc_xics_free_icp(vcpu);
819db93f574SHollis Blanchard 		break;
82074ce2e60SSean Christopherson 	case KVMPPC_IRQ_XIVE:
82174ce2e60SSean Christopherson 		kvmppc_xive_native_cleanup_vcpu(vcpu);
822bbf45ba5SHollis Blanchard 		break;
823bbf45ba5SHollis Blanchard 	}
824bbf45ba5SHollis Blanchard 
825bbf45ba5SHollis Blanchard 	kvmppc_core_vcpu_free(vcpu);
8269dd921cfSHollis Blanchard 
827bbf45ba5SHollis Blanchard 	kvmppc_subarch_vcpu_uninit(vcpu);
828bbf45ba5SHollis Blanchard }
829bbf45ba5SHollis Blanchard 
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)830bbf45ba5SHollis Blanchard int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
831eab17672SScott Wood {
832eab17672SScott Wood 	return kvmppc_core_pending_dec(vcpu);
833eab17672SScott Wood }
834eab17672SScott Wood 
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)835eab17672SScott Wood void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
836eab17672SScott Wood {
837eab17672SScott Wood #ifdef CONFIG_BOOKE
838eab17672SScott Wood 	/*
839eab17672SScott Wood 	 * vrsave (formerly usprg0) isn't used by Linux, but may
840eab17672SScott Wood 	 * be used by the guest.
8419dd921cfSHollis Blanchard 	 *
842bbf45ba5SHollis Blanchard 	 * On non-booke this is associated with Altivec and
843bbf45ba5SHollis Blanchard 	 * is handled by code in book3s.c.
844bbf45ba5SHollis Blanchard 	 */
845bbf45ba5SHollis Blanchard 	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
8469dd921cfSHollis Blanchard #endif
847eab17672SScott Wood 	kvmppc_core_vcpu_load(vcpu, cpu);
848eab17672SScott Wood }
849eab17672SScott Wood 
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)850bbf45ba5SHollis Blanchard void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
851bbf45ba5SHollis Blanchard {
8529576730dSSuresh Warrier 	kvmppc_core_vcpu_put(vcpu);
8539576730dSSuresh Warrier #ifdef CONFIG_BOOKE
8549576730dSSuresh Warrier 	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
8559576730dSSuresh Warrier #endif
8569576730dSSuresh Warrier }
8579576730dSSuresh Warrier 
8589576730dSSuresh Warrier /*
8599576730dSSuresh Warrier  * irq_bypass_add_producer and irq_bypass_del_producer are only
8609576730dSSuresh Warrier  * useful if the architecture supports PCI passthrough.
8619576730dSSuresh Warrier  * irq_bypass_stop and irq_bypass_start are not needed and so
8629576730dSSuresh Warrier  * kvm_ops are not defined for them.
8639576730dSSuresh Warrier  */
kvm_arch_has_irq_bypass(void)8649576730dSSuresh Warrier bool kvm_arch_has_irq_bypass(void)
8659576730dSSuresh Warrier {
8669576730dSSuresh Warrier 	return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
8679576730dSSuresh Warrier 		(kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
8689576730dSSuresh Warrier }
8699576730dSSuresh Warrier 
kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)8709576730dSSuresh Warrier int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
8719576730dSSuresh Warrier 				     struct irq_bypass_producer *prod)
8729576730dSSuresh Warrier {
8739576730dSSuresh Warrier 	struct kvm_kernel_irqfd *irqfd =
8749576730dSSuresh Warrier 		container_of(cons, struct kvm_kernel_irqfd, consumer);
8759576730dSSuresh Warrier 	struct kvm *kvm = irqfd->kvm;
8769576730dSSuresh Warrier 
8779576730dSSuresh Warrier 	if (kvm->arch.kvm_ops->irq_bypass_add_producer)
8789576730dSSuresh Warrier 		return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
8799576730dSSuresh Warrier 
8809576730dSSuresh Warrier 	return 0;
8819576730dSSuresh Warrier }
8829576730dSSuresh Warrier 
kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)8839576730dSSuresh Warrier void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
8849576730dSSuresh Warrier 				      struct irq_bypass_producer *prod)
8859576730dSSuresh Warrier {
8869576730dSSuresh Warrier 	struct kvm_kernel_irqfd *irqfd =
8879576730dSSuresh Warrier 		container_of(cons, struct kvm_kernel_irqfd, consumer);
8886f63e81bSBin Lu 	struct kvm *kvm = irqfd->kvm;
8896f63e81bSBin Lu 
8906f63e81bSBin Lu 	if (kvm->arch.kvm_ops->irq_bypass_del_producer)
8916f63e81bSBin Lu 		kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
8926f63e81bSBin Lu }
8936f63e81bSBin Lu 
8946f63e81bSBin Lu #ifdef CONFIG_VSX
kvmppc_get_vsr_dword_offset(int index)8956f63e81bSBin Lu static inline int kvmppc_get_vsr_dword_offset(int index)
8966f63e81bSBin Lu {
8976f63e81bSBin Lu 	int offset;
8986f63e81bSBin Lu 
8996f63e81bSBin Lu 	if ((index != 0) && (index != 1))
9006f63e81bSBin Lu 		return -1;
9016f63e81bSBin Lu 
9026f63e81bSBin Lu #ifdef __BIG_ENDIAN
9036f63e81bSBin Lu 	offset =  index;
9046f63e81bSBin Lu #else
9056f63e81bSBin Lu 	offset = 1 - index;
9066f63e81bSBin Lu #endif
9076f63e81bSBin Lu 
9086f63e81bSBin Lu 	return offset;
9096f63e81bSBin Lu }
9106f63e81bSBin Lu 
kvmppc_get_vsr_word_offset(int index)9116f63e81bSBin Lu static inline int kvmppc_get_vsr_word_offset(int index)
9126f63e81bSBin Lu {
9136f63e81bSBin Lu 	int offset;
9146f63e81bSBin Lu 
9156f63e81bSBin Lu 	if ((index > 3) || (index < 0))
9166f63e81bSBin Lu 		return -1;
9176f63e81bSBin Lu 
9186f63e81bSBin Lu #ifdef __BIG_ENDIAN
9196f63e81bSBin Lu 	offset = index;
9206f63e81bSBin Lu #else
9216f63e81bSBin Lu 	offset = 3 - index;
9226f63e81bSBin Lu #endif
9236f63e81bSBin Lu 	return offset;
9246f63e81bSBin Lu }
9256f63e81bSBin Lu 
kvmppc_set_vsr_dword(struct kvm_vcpu * vcpu,u64 gpr)9266f63e81bSBin Lu static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
9276f63e81bSBin Lu 	u64 gpr)
9286f63e81bSBin Lu {
9296f63e81bSBin Lu 	union kvmppc_one_reg val;
9304eeb8556SSimon Guo 	int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
9314eeb8556SSimon Guo 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
9326f63e81bSBin Lu 
9334eeb8556SSimon Guo 	if (offset == -1)
9346f63e81bSBin Lu 		return;
9356f63e81bSBin Lu 
9366f63e81bSBin Lu 	if (index >= 32) {
9376f63e81bSBin Lu 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
9386f63e81bSBin Lu 		val.vsxval[offset] = gpr;
9396f63e81bSBin Lu 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
9406f63e81bSBin Lu 	} else {
9416f63e81bSBin Lu 		VCPU_VSX_FPR(vcpu, index, offset) = gpr;
9426f63e81bSBin Lu 	}
9436f63e81bSBin Lu }
9446f63e81bSBin Lu 
kvmppc_set_vsr_dword_dump(struct kvm_vcpu * vcpu,u64 gpr)9454eeb8556SSimon Guo static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
9464eeb8556SSimon Guo 	u64 gpr)
9476f63e81bSBin Lu {
9486f63e81bSBin Lu 	union kvmppc_one_reg val;
9494eeb8556SSimon Guo 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
9506f63e81bSBin Lu 
9516f63e81bSBin Lu 	if (index >= 32) {
9526f63e81bSBin Lu 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
9536f63e81bSBin Lu 		val.vsxval[0] = gpr;
9546f63e81bSBin Lu 		val.vsxval[1] = gpr;
9556f63e81bSBin Lu 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
95694dd7fa1SSimon Guo 	} else {
95794dd7fa1SSimon Guo 		VCPU_VSX_FPR(vcpu, index, 0) = gpr;
95894dd7fa1SSimon Guo 		VCPU_VSX_FPR(vcpu, index, 1) = gpr;
95994dd7fa1SSimon Guo 	}
96094dd7fa1SSimon Guo }
96194dd7fa1SSimon Guo 
kvmppc_set_vsr_word_dump(struct kvm_vcpu * vcpu,u32 gpr)9624eeb8556SSimon Guo static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
96394dd7fa1SSimon Guo 	u32 gpr)
96494dd7fa1SSimon Guo {
96594dd7fa1SSimon Guo 	union kvmppc_one_reg val;
96694dd7fa1SSimon Guo 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
9674eeb8556SSimon Guo 
96894dd7fa1SSimon Guo 	if (index >= 32) {
96994dd7fa1SSimon Guo 		val.vsx32val[0] = gpr;
97094dd7fa1SSimon Guo 		val.vsx32val[1] = gpr;
97194dd7fa1SSimon Guo 		val.vsx32val[2] = gpr;
97294dd7fa1SSimon Guo 		val.vsx32val[3] = gpr;
97394dd7fa1SSimon Guo 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
97494dd7fa1SSimon Guo 	} else {
97594dd7fa1SSimon Guo 		val.vsx32val[0] = gpr;
9766f63e81bSBin Lu 		val.vsx32val[1] = gpr;
9776f63e81bSBin Lu 		VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
9786f63e81bSBin Lu 		VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
9796f63e81bSBin Lu 	}
9806f63e81bSBin Lu }
9816f63e81bSBin Lu 
kvmppc_set_vsr_word(struct kvm_vcpu * vcpu,u32 gpr32)9826f63e81bSBin Lu static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
9836f63e81bSBin Lu 	u32 gpr32)
9846f63e81bSBin Lu {
9856f63e81bSBin Lu 	union kvmppc_one_reg val;
9866f63e81bSBin Lu 	int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
9874eeb8556SSimon Guo 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
9884eeb8556SSimon Guo 	int dword_offset, word_offset;
9896f63e81bSBin Lu 
9904eeb8556SSimon Guo 	if (offset == -1)
9916f63e81bSBin Lu 		return;
9926f63e81bSBin Lu 
9936f63e81bSBin Lu 	if (index >= 32) {
9946f63e81bSBin Lu 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
9956f63e81bSBin Lu 		val.vsx32val[offset] = gpr32;
9966f63e81bSBin Lu 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
9976f63e81bSBin Lu 	} else {
9986f63e81bSBin Lu 		dword_offset = offset / 2;
9996f63e81bSBin Lu 		word_offset = offset % 2;
10006f63e81bSBin Lu 		val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
100109f98496SJose Ricardo Ziviani 		val.vsx32val[word_offset] = gpr32;
1002acc9eb93SSimon Guo 		VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
1003acc9eb93SSimon Guo 	}
1004acc9eb93SSimon Guo }
1005acc9eb93SSimon Guo #endif /* CONFIG_VSX */
1006acc9eb93SSimon Guo 
1007acc9eb93SSimon Guo #ifdef CONFIG_ALTIVEC
kvmppc_get_vmx_offset_generic(struct kvm_vcpu * vcpu,int index,int element_size)1008acc9eb93SSimon Guo static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
1009acc9eb93SSimon Guo 		int index, int element_size)
1010acc9eb93SSimon Guo {
1011acc9eb93SSimon Guo 	int offset;
1012acc9eb93SSimon Guo 	int elts = sizeof(vector128)/element_size;
1013acc9eb93SSimon Guo 
1014acc9eb93SSimon Guo 	if ((index < 0) || (index >= elts))
1015acc9eb93SSimon Guo 		return -1;
1016acc9eb93SSimon Guo 
1017acc9eb93SSimon Guo 	if (kvmppc_need_byteswap(vcpu))
1018acc9eb93SSimon Guo 		offset = elts - index - 1;
1019acc9eb93SSimon Guo 	else
1020acc9eb93SSimon Guo 		offset = index;
1021acc9eb93SSimon Guo 
1022acc9eb93SSimon Guo 	return offset;
1023acc9eb93SSimon Guo }
1024acc9eb93SSimon Guo 
kvmppc_get_vmx_dword_offset(struct kvm_vcpu * vcpu,int index)1025acc9eb93SSimon Guo static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1026acc9eb93SSimon Guo 		int index)
1027acc9eb93SSimon Guo {
1028acc9eb93SSimon Guo 	return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1029acc9eb93SSimon Guo }
1030acc9eb93SSimon Guo 
kvmppc_get_vmx_word_offset(struct kvm_vcpu * vcpu,int index)1031acc9eb93SSimon Guo static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1032acc9eb93SSimon Guo 		int index)
1033acc9eb93SSimon Guo {
1034acc9eb93SSimon Guo 	return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1035acc9eb93SSimon Guo }
1036acc9eb93SSimon Guo 
kvmppc_get_vmx_hword_offset(struct kvm_vcpu * vcpu,int index)1037acc9eb93SSimon Guo static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1038acc9eb93SSimon Guo 		int index)
1039acc9eb93SSimon Guo {
1040acc9eb93SSimon Guo 	return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1041acc9eb93SSimon Guo }
1042acc9eb93SSimon Guo 
kvmppc_get_vmx_byte_offset(struct kvm_vcpu * vcpu,int index)1043acc9eb93SSimon Guo static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
104409f98496SJose Ricardo Ziviani 		int index)
104509f98496SJose Ricardo Ziviani {
104609f98496SJose Ricardo Ziviani 	return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1047acc9eb93SSimon Guo }
1048acc9eb93SSimon Guo 
1049acc9eb93SSimon Guo 
kvmppc_set_vmx_dword(struct kvm_vcpu * vcpu,u64 gpr)105009f98496SJose Ricardo Ziviani static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
105109f98496SJose Ricardo Ziviani 	u64 gpr)
1052acc9eb93SSimon Guo {
105309f98496SJose Ricardo Ziviani 	union kvmppc_one_reg val;
105409f98496SJose Ricardo Ziviani 	int offset = kvmppc_get_vmx_dword_offset(vcpu,
1055acc9eb93SSimon Guo 			vcpu->arch.mmio_vmx_offset);
1056acc9eb93SSimon Guo 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1057acc9eb93SSimon Guo 
1058acc9eb93SSimon Guo 	if (offset == -1)
105909f98496SJose Ricardo Ziviani 		return;
1060acc9eb93SSimon Guo 
1061acc9eb93SSimon Guo 	val.vval = VCPU_VSX_VR(vcpu, index);
1062acc9eb93SSimon Guo 	val.vsxval[offset] = gpr;
1063acc9eb93SSimon Guo 	VCPU_VSX_VR(vcpu, index) = val.vval;
1064acc9eb93SSimon Guo }
1065acc9eb93SSimon Guo 
kvmppc_set_vmx_word(struct kvm_vcpu * vcpu,u32 gpr32)1066acc9eb93SSimon Guo static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1067acc9eb93SSimon Guo 	u32 gpr32)
1068acc9eb93SSimon Guo {
1069acc9eb93SSimon Guo 	union kvmppc_one_reg val;
1070acc9eb93SSimon Guo 	int offset = kvmppc_get_vmx_word_offset(vcpu,
1071acc9eb93SSimon Guo 			vcpu->arch.mmio_vmx_offset);
1072acc9eb93SSimon Guo 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1073acc9eb93SSimon Guo 
1074acc9eb93SSimon Guo 	if (offset == -1)
1075acc9eb93SSimon Guo 		return;
1076acc9eb93SSimon Guo 
1077acc9eb93SSimon Guo 	val.vval = VCPU_VSX_VR(vcpu, index);
1078acc9eb93SSimon Guo 	val.vsx32val[offset] = gpr32;
1079acc9eb93SSimon Guo 	VCPU_VSX_VR(vcpu, index) = val.vval;
1080acc9eb93SSimon Guo }
1081acc9eb93SSimon Guo 
kvmppc_set_vmx_hword(struct kvm_vcpu * vcpu,u16 gpr16)1082acc9eb93SSimon Guo static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1083acc9eb93SSimon Guo 	u16 gpr16)
1084acc9eb93SSimon Guo {
1085acc9eb93SSimon Guo 	union kvmppc_one_reg val;
1086acc9eb93SSimon Guo 	int offset = kvmppc_get_vmx_hword_offset(vcpu,
1087acc9eb93SSimon Guo 			vcpu->arch.mmio_vmx_offset);
1088acc9eb93SSimon Guo 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1089acc9eb93SSimon Guo 
1090acc9eb93SSimon Guo 	if (offset == -1)
1091acc9eb93SSimon Guo 		return;
1092acc9eb93SSimon Guo 
1093acc9eb93SSimon Guo 	val.vval = VCPU_VSX_VR(vcpu, index);
1094acc9eb93SSimon Guo 	val.vsx16val[offset] = gpr16;
1095acc9eb93SSimon Guo 	VCPU_VSX_VR(vcpu, index) = val.vval;
1096acc9eb93SSimon Guo }
1097acc9eb93SSimon Guo 
kvmppc_set_vmx_byte(struct kvm_vcpu * vcpu,u8 gpr8)1098acc9eb93SSimon Guo static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1099acc9eb93SSimon Guo 	u8 gpr8)
1100acc9eb93SSimon Guo {
1101acc9eb93SSimon Guo 	union kvmppc_one_reg val;
1102acc9eb93SSimon Guo 	int offset = kvmppc_get_vmx_byte_offset(vcpu,
1103acc9eb93SSimon Guo 			vcpu->arch.mmio_vmx_offset);
1104acc9eb93SSimon Guo 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1105acc9eb93SSimon Guo 
110609f98496SJose Ricardo Ziviani 	if (offset == -1)
110709f98496SJose Ricardo Ziviani 		return;
110809f98496SJose Ricardo Ziviani 
11096f63e81bSBin Lu 	val.vval = VCPU_VSX_VR(vcpu, index);
11106f63e81bSBin Lu 	val.vsx8val[offset] = gpr8;
11116f63e81bSBin Lu 	VCPU_VSX_VR(vcpu, index) = val.vval;
11126f63e81bSBin Lu }
11136f63e81bSBin Lu #endif /* CONFIG_ALTIVEC */
11146f63e81bSBin Lu 
11156f63e81bSBin Lu #ifdef CONFIG_PPC_FPU
sp_to_dp(u32 fprs)11162a24d80fSNick Desaulniers static inline u64 sp_to_dp(u32 fprs)
11176f63e81bSBin Lu {
11186f63e81bSBin Lu 	u64 fprd;
11196f63e81bSBin Lu 
11206f63e81bSBin Lu 	preempt_disable();
11216f63e81bSBin Lu 	enable_kernel_fp();
11226f63e81bSBin Lu 	asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
11236f63e81bSBin Lu 	     : "fr0");
11246f63e81bSBin Lu 	preempt_enable();
11256f63e81bSBin Lu 	return fprd;
11266f63e81bSBin Lu }
11276f63e81bSBin Lu 
dp_to_sp(u64 fprd)11282a24d80fSNick Desaulniers static inline u32 dp_to_sp(u64 fprd)
11296f63e81bSBin Lu {
11306f63e81bSBin Lu 	u32 fprs;
11316f63e81bSBin Lu 
11326f63e81bSBin Lu 	preempt_disable();
11336f63e81bSBin Lu 	enable_kernel_fp();
11346f63e81bSBin Lu 	asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
11356f63e81bSBin Lu 	     : "fr0");
11366f63e81bSBin Lu 	preempt_enable();
11376f63e81bSBin Lu 	return fprs;
11386f63e81bSBin Lu }
11398c99d345STianjia Zhang 
1140bbf45ba5SHollis Blanchard #else
11418c99d345STianjia Zhang #define sp_to_dp(x)	(x)
11423f649ab7SKees Cook #define dp_to_sp(x)	(x)
1143bbf45ba5SHollis Blanchard #endif /* CONFIG_PPC_FPU */
11443f831504SFabiano Rosas 
kvmppc_complete_mmio_load(struct kvm_vcpu * vcpu)1145bbf45ba5SHollis Blanchard static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1146bbf45ba5SHollis Blanchard {
1147d078eed3SDavid Gibson 	struct kvm_run *run = vcpu->run;
1148bbf45ba5SHollis Blanchard 	u64 gpr;
1149b104d066SAlexander Graf 
11508e5b26b5SAlexander Graf 	if (run->mmio.len > sizeof(gpr))
11518e5b26b5SAlexander Graf 		return;
11528e5b26b5SAlexander Graf 
1153bbf45ba5SHollis Blanchard 	if (!vcpu->arch.mmio_host_swabbed) {
1154bbf45ba5SHollis Blanchard 		switch (run->mmio.len) {
1155bbf45ba5SHollis Blanchard 		case 8: gpr = *(u64 *)run->mmio.data; break;
1156d078eed3SDavid Gibson 		case 4: gpr = *(u32 *)run->mmio.data; break;
1157d078eed3SDavid Gibson 		case 2: gpr = *(u16 *)run->mmio.data; break;
1158d078eed3SDavid Gibson 		case 1: gpr = *(u8 *)run->mmio.data; break;
11598e5b26b5SAlexander Graf 		}
1160bbf45ba5SHollis Blanchard 	} else {
1161bbf45ba5SHollis Blanchard 		switch (run->mmio.len) {
11628e5b26b5SAlexander Graf 		case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
11636f63e81bSBin Lu 		case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
11646f63e81bSBin Lu 		case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
11656f63e81bSBin Lu 		case 1: gpr = *(u8 *)run->mmio.data; break;
11666f63e81bSBin Lu 		}
11673587d534SAlexander Graf 	}
11683587d534SAlexander Graf 
11693587d534SAlexander Graf 	/* conversion between single and double precision */
11703587d534SAlexander Graf 	if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
11713587d534SAlexander Graf 		gpr = sp_to_dp(gpr);
11723587d534SAlexander Graf 
11733587d534SAlexander Graf 	if (vcpu->arch.mmio_sign_extend) {
11743587d534SAlexander Graf 		switch (run->mmio.len) {
11753587d534SAlexander Graf #ifdef CONFIG_PPC64
11763587d534SAlexander Graf 		case 4:
11773587d534SAlexander Graf 			gpr = (s64)(s32)gpr;
11783587d534SAlexander Graf 			break;
11793587d534SAlexander Graf #endif
11803587d534SAlexander Graf 		case 2:
11813587d534SAlexander Graf 			gpr = (s64)(s16)gpr;
11823587d534SAlexander Graf 			break;
1183b3c5d3c2SAlexander Graf 		case 1:
1184b3c5d3c2SAlexander Graf 			gpr = (s64)(s8)gpr;
1185b104d066SAlexander Graf 			break;
1186b104d066SAlexander Graf 		}
1187b3c5d3c2SAlexander Graf 	}
11882e6baa46SSimon Guo 
11892e6baa46SSimon Guo 	switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
11902e6baa46SSimon Guo 	case KVM_MMIO_REG_GPR:
1191efff1912SPaul Mackerras 		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1192b104d066SAlexander Graf 		break;
1193287d5611SAlexander Graf 	case KVM_MMIO_REG_FPR:
1194b3c5d3c2SAlexander Graf 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1195b3c5d3c2SAlexander Graf 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1196b104d066SAlexander Graf 
1197b3c5d3c2SAlexander Graf 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1198efff1912SPaul Mackerras 		break;
1199b3c5d3c2SAlexander Graf #ifdef CONFIG_PPC_BOOK3S
1200b104d066SAlexander Graf 	case KVM_MMIO_REG_QPR:
1201287d5611SAlexander Graf 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
12026f63e81bSBin Lu 		break;
12036f63e81bSBin Lu 	case KVM_MMIO_REG_FQPR:
12042e6baa46SSimon Guo 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
12052e6baa46SSimon Guo 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
12062e6baa46SSimon Guo 		break;
1207da2a32b8SSimon Guo #endif
12086f63e81bSBin Lu #ifdef CONFIG_VSX
1209da2a32b8SSimon Guo 	case KVM_MMIO_REG_VSX:
12106f63e81bSBin Lu 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1211da2a32b8SSimon Guo 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
12126f63e81bSBin Lu 
12136f63e81bSBin Lu 		if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1214da2a32b8SSimon Guo 			kvmppc_set_vsr_dword(vcpu, gpr);
121594dd7fa1SSimon Guo 		else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
121694dd7fa1SSimon Guo 			kvmppc_set_vsr_word(vcpu, gpr);
12176f63e81bSBin Lu 		else if (vcpu->arch.mmio_copy_type ==
12186f63e81bSBin Lu 				KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
121909f98496SJose Ricardo Ziviani 			kvmppc_set_vsr_dword_dump(vcpu, gpr);
122009f98496SJose Ricardo Ziviani 		else if (vcpu->arch.mmio_copy_type ==
12212e6baa46SSimon Guo 				KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
12222e6baa46SSimon Guo 			kvmppc_set_vsr_word_dump(vcpu, gpr);
12232e6baa46SSimon Guo 		break;
1224acc9eb93SSimon Guo #endif
122509f98496SJose Ricardo Ziviani #ifdef CONFIG_ALTIVEC
1226acc9eb93SSimon Guo 	case KVM_MMIO_REG_VMX:
1227acc9eb93SSimon Guo 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1228acc9eb93SSimon Guo 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1229acc9eb93SSimon Guo 
1230acc9eb93SSimon Guo 		if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1231acc9eb93SSimon Guo 			kvmppc_set_vmx_dword(vcpu, gpr);
1232acc9eb93SSimon Guo 		else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1233acc9eb93SSimon Guo 			kvmppc_set_vmx_word(vcpu, gpr);
123409f98496SJose Ricardo Ziviani 		else if (vcpu->arch.mmio_copy_type ==
123509f98496SJose Ricardo Ziviani 				KVMPPC_VMX_COPY_HWORD)
1236873db2cdSSuraj Jitindar Singh 			kvmppc_set_vmx_hword(vcpu, gpr);
1237873db2cdSSuraj Jitindar Singh 		else if (vcpu->arch.mmio_copy_type ==
1238873db2cdSSuraj Jitindar Singh 				KVMPPC_VMX_COPY_BYTE)
1239873db2cdSSuraj Jitindar Singh 			kvmppc_set_vmx_byte(vcpu, gpr);
1240873db2cdSSuraj Jitindar Singh 		break;
1241873db2cdSSuraj Jitindar Singh #endif
1242873db2cdSSuraj Jitindar Singh #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1243873db2cdSSuraj Jitindar Singh 	case KVM_MMIO_REG_NESTED_GPR:
1244b104d066SAlexander Graf 		if (kvmppc_need_byteswap(vcpu))
1245b104d066SAlexander Graf 			gpr = swab64(gpr);
1246b104d066SAlexander Graf 		kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1247bbf45ba5SHollis Blanchard 				     sizeof(gpr));
1248bbf45ba5SHollis Blanchard 		break;
12498c99d345STianjia Zhang #endif
125073601775SCédric Le Goater 	default:
1251eb8b0560SPaul Mackerras 		BUG();
1252bbf45ba5SHollis Blanchard 	}
12538c99d345STianjia Zhang }
1254ed840ee9SScott Wood 
__kvmppc_handle_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian,int sign_extend)1255d078eed3SDavid Gibson static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
125673601775SCédric Le Goater 				unsigned int rt, unsigned int bytes,
1257d078eed3SDavid Gibson 				int is_default_endian, int sign_extend)
125873601775SCédric Le Goater {
1259d078eed3SDavid Gibson 	struct kvm_run *run = vcpu->run;
126073601775SCédric Le Goater 	int idx, ret;
1261d078eed3SDavid Gibson 	bool host_swabbed;
126273601775SCédric Le Goater 
1263ed840ee9SScott Wood 	/* Pity C doesn't have a logical XOR operator */
12643f831504SFabiano Rosas 	if (kvmppc_need_byteswap(vcpu)) {
12653f831504SFabiano Rosas 		host_swabbed = is_default_endian;
1266bbf45ba5SHollis Blanchard 	} else {
1267bbf45ba5SHollis Blanchard 		host_swabbed = !is_default_endian;
1268bbf45ba5SHollis Blanchard 	}
1269bbf45ba5SHollis Blanchard 
1270bbf45ba5SHollis Blanchard 	if (bytes > sizeof(run->mmio.data))
1271bbf45ba5SHollis Blanchard 		return EMULATE_FAIL;
1272d078eed3SDavid Gibson 
1273bbf45ba5SHollis Blanchard 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1274bbf45ba5SHollis Blanchard 	run->mmio.len = bytes;
1275eb8b0560SPaul Mackerras 	run->mmio.is_write = 0;
1276bbf45ba5SHollis Blanchard 
1277ed840ee9SScott Wood 	vcpu->arch.io_gpr = rt;
1278ed840ee9SScott Wood 	vcpu->arch.mmio_host_swabbed = host_swabbed;
1279e32edf4fSNikolay Nikolaev 	vcpu->mmio_needed = 1;
1280ed840ee9SScott Wood 	vcpu->mmio_is_write = 0;
1281ed840ee9SScott Wood 	vcpu->arch.mmio_sign_extend = sign_extend;
1282ed840ee9SScott Wood 
1283ed840ee9SScott Wood 	idx = srcu_read_lock(&vcpu->kvm->srcu);
1284ed840ee9SScott Wood 
12858c99d345STianjia Zhang 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
12860e673fb6SAlexander Graf 			      bytes, &run->mmio.data);
12870e673fb6SAlexander Graf 
12880e673fb6SAlexander Graf 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
12890e673fb6SAlexander Graf 
1290bbf45ba5SHollis Blanchard 	if (!ret) {
1291bbf45ba5SHollis Blanchard 		kvmppc_complete_mmio_load(vcpu);
1292eb8b0560SPaul Mackerras 		vcpu->mmio_needed = 0;
12938c99d345STianjia Zhang 		return EMULATE_DONE;
1294eb8b0560SPaul Mackerras 	}
1295eb8b0560SPaul Mackerras 
1296eb8b0560SPaul Mackerras 	return EMULATE_DO_MMIO;
12978c99d345STianjia Zhang }
1298eb8b0560SPaul Mackerras 
kvmppc_handle_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian)12992ba9f0d8SAneesh Kumar K.V int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1300bbf45ba5SHollis Blanchard 		       unsigned int rt, unsigned int bytes,
13013587d534SAlexander Graf 		       int is_default_endian)
13028c99d345STianjia Zhang {
130373601775SCédric Le Goater 	return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
130473601775SCédric Le Goater }
13053587d534SAlexander Graf EXPORT_SYMBOL_GPL(kvmppc_handle_load);
13068c99d345STianjia Zhang 
13073587d534SAlexander Graf /* Same as above, but sign extends */
kvmppc_handle_loads(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian)13083587d534SAlexander Graf int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
13096f63e81bSBin Lu 			unsigned int rt, unsigned int bytes,
13108c99d345STianjia Zhang 			int is_default_endian)
13116f63e81bSBin Lu {
13126f63e81bSBin Lu 	return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
13136f63e81bSBin Lu }
13146f63e81bSBin Lu 
13156f63e81bSBin Lu #ifdef CONFIG_VSX
kvmppc_handle_vsx_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian,int mmio_sign_extend)13169aa6825bSPaul Mackerras int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
13179aa6825bSPaul Mackerras 			unsigned int rt, unsigned int bytes,
13186f63e81bSBin Lu 			int is_default_endian, int mmio_sign_extend)
13196f63e81bSBin Lu {
13206f63e81bSBin Lu 	enum emulation_result emulated = EMULATE_DONE;
13218c99d345STianjia Zhang 
13226f63e81bSBin Lu 	/* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
13236f63e81bSBin Lu 	if (vcpu->arch.mmio_vsx_copy_nums > 4)
13246f63e81bSBin Lu 		return EMULATE_FAIL;
13256f63e81bSBin Lu 
13266f63e81bSBin Lu 	while (vcpu->arch.mmio_vsx_copy_nums) {
13278c99d345STianjia Zhang 		emulated = __kvmppc_handle_load(vcpu, rt, bytes,
13286f63e81bSBin Lu 			is_default_endian, mmio_sign_extend);
13296f63e81bSBin Lu 
13306f63e81bSBin Lu 		if (emulated != EMULATE_DONE)
13316f63e81bSBin Lu 			break;
13326f63e81bSBin Lu 
13336f63e81bSBin Lu 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
13346f63e81bSBin Lu 
13356f63e81bSBin Lu 		vcpu->arch.mmio_vsx_copy_nums--;
13368c99d345STianjia Zhang 		vcpu->arch.mmio_vsx_offset++;
133773601775SCédric Le Goater 	}
1338bbf45ba5SHollis Blanchard 	return emulated;
13398c99d345STianjia Zhang }
1340bbf45ba5SHollis Blanchard #endif /* CONFIG_VSX */
1341ed840ee9SScott Wood 
kvmppc_handle_store(struct kvm_vcpu * vcpu,u64 val,unsigned int bytes,int is_default_endian)1342d078eed3SDavid Gibson int kvmppc_handle_store(struct kvm_vcpu *vcpu,
134373601775SCédric Le Goater 			u64 val, unsigned int bytes, int is_default_endian)
1344d078eed3SDavid Gibson {
134573601775SCédric Le Goater 	struct kvm_run *run = vcpu->run;
1346d078eed3SDavid Gibson 	void *data = run->mmio.data;
134773601775SCédric Le Goater 	int idx, ret;
1348d078eed3SDavid Gibson 	bool host_swabbed;
134973601775SCédric Le Goater 
1350bbf45ba5SHollis Blanchard 	/* Pity C doesn't have a logical XOR operator */
13513f831504SFabiano Rosas 	if (kvmppc_need_byteswap(vcpu)) {
13523f831504SFabiano Rosas 		host_swabbed = is_default_endian;
1353bbf45ba5SHollis Blanchard 	} else {
1354bbf45ba5SHollis Blanchard 		host_swabbed = !is_default_endian;
1355bbf45ba5SHollis Blanchard 	}
1356bbf45ba5SHollis Blanchard 
1357bbf45ba5SHollis Blanchard 	if (bytes > sizeof(run->mmio.data))
1358bbf45ba5SHollis Blanchard 		return EMULATE_FAIL;
1359bbf45ba5SHollis Blanchard 
13606f63e81bSBin Lu 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
13616f63e81bSBin Lu 	run->mmio.len = bytes;
13626f63e81bSBin Lu 	run->mmio.is_write = 1;
1363bbf45ba5SHollis Blanchard 	vcpu->mmio_needed = 1;
1364d078eed3SDavid Gibson 	vcpu->mmio_is_write = 1;
1365bbf45ba5SHollis Blanchard 
1366b104d066SAlexander Graf 	if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1367bbf45ba5SHollis Blanchard 		val = dp_to_sp(val);
1368bbf45ba5SHollis Blanchard 
1369bbf45ba5SHollis Blanchard 	/* Store the value at the lowest bytes in 'data'. */
1370bbf45ba5SHollis Blanchard 	if (!host_swabbed) {
1371bbf45ba5SHollis Blanchard 		switch (bytes) {
1372bbf45ba5SHollis Blanchard 		case 8: *(u64 *)data = val; break;
1373d078eed3SDavid Gibson 		case 4: *(u32 *)data = val; break;
1374d078eed3SDavid Gibson 		case 2: *(u16 *)data = val; break;
1375d078eed3SDavid Gibson 		case 1: *(u8  *)data = val; break;
1376bbf45ba5SHollis Blanchard 		}
1377bbf45ba5SHollis Blanchard 	} else {
1378bbf45ba5SHollis Blanchard 		switch (bytes) {
1379bbf45ba5SHollis Blanchard 		case 8: *(u64 *)data = swab64(val); break;
1380ed840ee9SScott Wood 		case 4: *(u32 *)data = swab32(val); break;
1381ed840ee9SScott Wood 		case 2: *(u16 *)data = swab16(val); break;
1382e32edf4fSNikolay Nikolaev 		case 1: *(u8  *)data = val; break;
1383ed840ee9SScott Wood 		}
1384ed840ee9SScott Wood 	}
1385ed840ee9SScott Wood 
1386ed840ee9SScott Wood 	idx = srcu_read_lock(&vcpu->kvm->srcu);
1387ed840ee9SScott Wood 
13880e673fb6SAlexander Graf 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
13890e673fb6SAlexander Graf 			       bytes, &run->mmio.data);
13900e673fb6SAlexander Graf 
13910e673fb6SAlexander Graf 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
1392bbf45ba5SHollis Blanchard 
1393bbf45ba5SHollis Blanchard 	if (!ret) {
13942ba9f0d8SAneesh Kumar K.V 		vcpu->mmio_needed = 0;
1395bbf45ba5SHollis Blanchard 		return EMULATE_DONE;
13966f63e81bSBin Lu 	}
13976f63e81bSBin Lu 
13986f63e81bSBin Lu 	return EMULATE_DO_MMIO;
13996f63e81bSBin Lu }
14006f63e81bSBin Lu EXPORT_SYMBOL_GPL(kvmppc_handle_store);
14016f63e81bSBin Lu 
1402da2a32b8SSimon Guo #ifdef CONFIG_VSX
kvmppc_get_vsr_data(struct kvm_vcpu * vcpu,int rs,u64 * val)14036f63e81bSBin Lu static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
14046f63e81bSBin Lu {
14056f63e81bSBin Lu 	u32 dword_offset, word_offset;
14066f63e81bSBin Lu 	union kvmppc_one_reg reg;
14076f63e81bSBin Lu 	int vsx_offset = 0;
14086f63e81bSBin Lu 	int copy_type = vcpu->arch.mmio_copy_type;
14096f63e81bSBin Lu 	int result = 0;
14106f63e81bSBin Lu 
14116f63e81bSBin Lu 	switch (copy_type) {
14126f63e81bSBin Lu 	case KVMPPC_VSX_COPY_DWORD:
14136f63e81bSBin Lu 		vsx_offset =
14146f63e81bSBin Lu 			kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
14154eeb8556SSimon Guo 
14166f63e81bSBin Lu 		if (vsx_offset == -1) {
14176f63e81bSBin Lu 			result = -1;
14184eeb8556SSimon Guo 			break;
14196f63e81bSBin Lu 		}
14206f63e81bSBin Lu 
14216f63e81bSBin Lu 		if (rs < 32) {
14226f63e81bSBin Lu 			*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
14236f63e81bSBin Lu 		} else {
14246f63e81bSBin Lu 			reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
14256f63e81bSBin Lu 			*val = reg.vsxval[vsx_offset];
14266f63e81bSBin Lu 		}
14276f63e81bSBin Lu 		break;
14286f63e81bSBin Lu 
14296f63e81bSBin Lu 	case KVMPPC_VSX_COPY_WORD:
14306f63e81bSBin Lu 		vsx_offset =
14316f63e81bSBin Lu 			kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
14324eeb8556SSimon Guo 
14336f63e81bSBin Lu 		if (vsx_offset == -1) {
14346f63e81bSBin Lu 			result = -1;
14356f63e81bSBin Lu 			break;
14366f63e81bSBin Lu 		}
14376f63e81bSBin Lu 
14384eeb8556SSimon Guo 		if (rs < 32) {
14396f63e81bSBin Lu 			dword_offset = vsx_offset / 2;
14406f63e81bSBin Lu 			word_offset = vsx_offset % 2;
14416f63e81bSBin Lu 			reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
14426f63e81bSBin Lu 			*val = reg.vsx32val[word_offset];
14436f63e81bSBin Lu 		} else {
14446f63e81bSBin Lu 			reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
14456f63e81bSBin Lu 			*val = reg.vsx32val[vsx_offset];
14466f63e81bSBin Lu 		}
14476f63e81bSBin Lu 		break;
14486f63e81bSBin Lu 
14496f63e81bSBin Lu 	default:
14506f63e81bSBin Lu 		result = -1;
14518c99d345STianjia Zhang 		break;
14526f63e81bSBin Lu 	}
14536f63e81bSBin Lu 
14546f63e81bSBin Lu 	return result;
14556f63e81bSBin Lu }
14566f63e81bSBin Lu 
kvmppc_handle_vsx_store(struct kvm_vcpu * vcpu,int rs,unsigned int bytes,int is_default_endian)14576f63e81bSBin Lu int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
14586f63e81bSBin Lu 			int rs, unsigned int bytes, int is_default_endian)
14599aa6825bSPaul Mackerras {
14609aa6825bSPaul Mackerras 	u64 val;
14616f63e81bSBin Lu 	enum emulation_result emulated = EMULATE_DONE;
14626f63e81bSBin Lu 
14636f63e81bSBin Lu 	vcpu->arch.io_gpr = rs;
14646f63e81bSBin Lu 
14656f63e81bSBin Lu 	/* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
14666f63e81bSBin Lu 	if (vcpu->arch.mmio_vsx_copy_nums > 4)
14678c99d345STianjia Zhang 		return EMULATE_FAIL;
14686f63e81bSBin Lu 
14696f63e81bSBin Lu 	while (vcpu->arch.mmio_vsx_copy_nums) {
14706f63e81bSBin Lu 		if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
14716f63e81bSBin Lu 			return EMULATE_FAIL;
14726f63e81bSBin Lu 
14738c99d345STianjia Zhang 		emulated = kvmppc_handle_store(vcpu,
14746f63e81bSBin Lu 			 val, bytes, is_default_endian);
14756f63e81bSBin Lu 
14766f63e81bSBin Lu 		if (emulated != EMULATE_DONE)
14776f63e81bSBin Lu 			break;
14786f63e81bSBin Lu 
14796f63e81bSBin Lu 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
14806f63e81bSBin Lu 
14816f63e81bSBin Lu 		vcpu->arch.mmio_vsx_copy_nums--;
14828c99d345STianjia Zhang 		vcpu->arch.mmio_vsx_offset++;
14836f63e81bSBin Lu 	}
14848c99d345STianjia Zhang 
14856f63e81bSBin Lu 	return emulated;
14866f63e81bSBin Lu }
14876f63e81bSBin Lu 
kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu * vcpu)14886f63e81bSBin Lu static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
14896f63e81bSBin Lu {
14906f63e81bSBin Lu 	struct kvm_run *run = vcpu->run;
14918c99d345STianjia Zhang 	enum emulation_result emulated = EMULATE_FAIL;
14926f63e81bSBin Lu 	int r;
14936f63e81bSBin Lu 
14948c99d345STianjia Zhang 	vcpu->arch.paddr_accessed += run->mmio.len;
14956f63e81bSBin Lu 
14966f63e81bSBin Lu 	if (!vcpu->mmio_is_write) {
14976f63e81bSBin Lu 		emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
14986f63e81bSBin Lu 			 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
14996f63e81bSBin Lu 	} else {
15006f63e81bSBin Lu 		emulated = kvmppc_handle_vsx_store(vcpu,
15016f63e81bSBin Lu 			 vcpu->arch.io_gpr, run->mmio.len, 1);
15026f63e81bSBin Lu 	}
15036f63e81bSBin Lu 
15046f63e81bSBin Lu 	switch (emulated) {
15056f63e81bSBin Lu 	case EMULATE_DO_MMIO:
15066f63e81bSBin Lu 		run->exit_reason = KVM_EXIT_MMIO;
15076f63e81bSBin Lu 		r = RESUME_HOST;
15086f63e81bSBin Lu 		break;
15096f63e81bSBin Lu 	case EMULATE_FAIL:
15106f63e81bSBin Lu 		pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
15116f63e81bSBin Lu 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
15126f63e81bSBin Lu 		run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
15136f63e81bSBin Lu 		r = RESUME_HOST;
15146f63e81bSBin Lu 		break;
15156f63e81bSBin Lu 	default:
15166f63e81bSBin Lu 		r = RESUME_GUEST;
151709f98496SJose Ricardo Ziviani 		break;
15188c99d345STianjia Zhang 	}
1519acc9eb93SSimon Guo 	return r;
152009f98496SJose Ricardo Ziviani }
15216df3877fSPaul Mackerras #endif /* CONFIG_VSX */
152209f98496SJose Ricardo Ziviani 
1523b99234b9SFabiano Rosas #ifdef CONFIG_ALTIVEC
kvmppc_handle_vmx_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian)1524acc9eb93SSimon Guo int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1525acc9eb93SSimon Guo 		unsigned int rt, unsigned int bytes, int is_default_endian)
152609f98496SJose Ricardo Ziviani {
15278c99d345STianjia Zhang 	enum emulation_result emulated = EMULATE_DONE;
152809f98496SJose Ricardo Ziviani 
152909f98496SJose Ricardo Ziviani 	if (vcpu->arch.mmio_vmx_copy_nums > 2)
153009f98496SJose Ricardo Ziviani 		return EMULATE_FAIL;
153109f98496SJose Ricardo Ziviani 
153209f98496SJose Ricardo Ziviani 	while (vcpu->arch.mmio_vmx_copy_nums) {
15338c99d345STianjia Zhang 		emulated = __kvmppc_handle_load(vcpu, rt, bytes,
153409f98496SJose Ricardo Ziviani 				is_default_endian, 0);
1535acc9eb93SSimon Guo 
153609f98496SJose Ricardo Ziviani 		if (emulated != EMULATE_DONE)
153709f98496SJose Ricardo Ziviani 			break;
153809f98496SJose Ricardo Ziviani 
153909f98496SJose Ricardo Ziviani 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
154009f98496SJose Ricardo Ziviani 		vcpu->arch.mmio_vmx_copy_nums--;
15419236f57aSCédric Le Goater 		vcpu->arch.mmio_vmx_offset++;
154209f98496SJose Ricardo Ziviani 	}
1543acc9eb93SSimon Guo 
1544acc9eb93SSimon Guo 	return emulated;
1545acc9eb93SSimon Guo }
154609f98496SJose Ricardo Ziviani 
kvmppc_get_vmx_dword(struct kvm_vcpu * vcpu,int index,u64 * val)1547acc9eb93SSimon Guo static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1548acc9eb93SSimon Guo {
1549acc9eb93SSimon Guo 	union kvmppc_one_reg reg;
1550acc9eb93SSimon Guo 	int vmx_offset = 0;
155109f98496SJose Ricardo Ziviani 	int result = 0;
155209f98496SJose Ricardo Ziviani 
1553acc9eb93SSimon Guo 	vmx_offset =
1554acc9eb93SSimon Guo 		kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
155509f98496SJose Ricardo Ziviani 
1556acc9eb93SSimon Guo 	if (vmx_offset == -1)
155709f98496SJose Ricardo Ziviani 		return -1;
155809f98496SJose Ricardo Ziviani 
15599236f57aSCédric Le Goater 	reg.vval = VCPU_VSX_VR(vcpu, index);
1560acc9eb93SSimon Guo 	*val = reg.vsxval[vmx_offset];
1561acc9eb93SSimon Guo 
1562acc9eb93SSimon Guo 	return result;
1563acc9eb93SSimon Guo }
1564acc9eb93SSimon Guo 
kvmppc_get_vmx_word(struct kvm_vcpu * vcpu,int index,u64 * val)1565acc9eb93SSimon Guo static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1566acc9eb93SSimon Guo {
1567acc9eb93SSimon Guo 	union kvmppc_one_reg reg;
1568acc9eb93SSimon Guo 	int vmx_offset = 0;
1569acc9eb93SSimon Guo 	int result = 0;
1570acc9eb93SSimon Guo 
1571acc9eb93SSimon Guo 	vmx_offset =
1572acc9eb93SSimon Guo 		kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1573acc9eb93SSimon Guo 
1574acc9eb93SSimon Guo 	if (vmx_offset == -1)
1575acc9eb93SSimon Guo 		return -1;
1576acc9eb93SSimon Guo 
15779236f57aSCédric Le Goater 	reg.vval = VCPU_VSX_VR(vcpu, index);
1578acc9eb93SSimon Guo 	*val = reg.vsx32val[vmx_offset];
1579acc9eb93SSimon Guo 
1580acc9eb93SSimon Guo 	return result;
1581acc9eb93SSimon Guo }
1582acc9eb93SSimon Guo 
kvmppc_get_vmx_hword(struct kvm_vcpu * vcpu,int index,u64 * val)1583acc9eb93SSimon Guo static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1584acc9eb93SSimon Guo {
1585acc9eb93SSimon Guo 	union kvmppc_one_reg reg;
1586acc9eb93SSimon Guo 	int vmx_offset = 0;
1587acc9eb93SSimon Guo 	int result = 0;
1588acc9eb93SSimon Guo 
1589acc9eb93SSimon Guo 	vmx_offset =
1590acc9eb93SSimon Guo 		kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1591acc9eb93SSimon Guo 
1592acc9eb93SSimon Guo 	if (vmx_offset == -1)
1593acc9eb93SSimon Guo 		return -1;
1594acc9eb93SSimon Guo 
15959236f57aSCédric Le Goater 	reg.vval = VCPU_VSX_VR(vcpu, index);
1596acc9eb93SSimon Guo 	*val = reg.vsx16val[vmx_offset];
1597acc9eb93SSimon Guo 
1598acc9eb93SSimon Guo 	return result;
1599acc9eb93SSimon Guo }
1600acc9eb93SSimon Guo 
kvmppc_get_vmx_byte(struct kvm_vcpu * vcpu,int index,u64 * val)1601acc9eb93SSimon Guo static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1602acc9eb93SSimon Guo {
1603acc9eb93SSimon Guo 	union kvmppc_one_reg reg;
1604acc9eb93SSimon Guo 	int vmx_offset = 0;
1605acc9eb93SSimon Guo 	int result = 0;
1606acc9eb93SSimon Guo 
1607acc9eb93SSimon Guo 	vmx_offset =
1608acc9eb93SSimon Guo 		kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1609acc9eb93SSimon Guo 
1610acc9eb93SSimon Guo 	if (vmx_offset == -1)
1611acc9eb93SSimon Guo 		return -1;
1612acc9eb93SSimon Guo 
16138c99d345STianjia Zhang 	reg.vval = VCPU_VSX_VR(vcpu, index);
1614acc9eb93SSimon Guo 	*val = reg.vsx8val[vmx_offset];
161509f98496SJose Ricardo Ziviani 
161609f98496SJose Ricardo Ziviani 	return result;
1617acc9eb93SSimon Guo }
161809f98496SJose Ricardo Ziviani 
kvmppc_handle_vmx_store(struct kvm_vcpu * vcpu,unsigned int rs,unsigned int bytes,int is_default_endian)161909f98496SJose Ricardo Ziviani int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1620b99234b9SFabiano Rosas 		unsigned int rs, unsigned int bytes, int is_default_endian)
1621acc9eb93SSimon Guo {
1622acc9eb93SSimon Guo 	u64 val = 0;
162309f98496SJose Ricardo Ziviani 	unsigned int index = rs & KVM_MMIO_REG_MASK;
162409f98496SJose Ricardo Ziviani 	enum emulation_result emulated = EMULATE_DONE;
162509f98496SJose Ricardo Ziviani 
1626acc9eb93SSimon Guo 	if (vcpu->arch.mmio_vmx_copy_nums > 2)
1627acc9eb93SSimon Guo 		return EMULATE_FAIL;
1628acc9eb93SSimon Guo 
162909f98496SJose Ricardo Ziviani 	vcpu->arch.io_gpr = rs;
163009f98496SJose Ricardo Ziviani 
1631acc9eb93SSimon Guo 	while (vcpu->arch.mmio_vmx_copy_nums) {
1632acc9eb93SSimon Guo 		switch (vcpu->arch.mmio_copy_type) {
1633acc9eb93SSimon Guo 		case KVMPPC_VMX_COPY_DWORD:
1634acc9eb93SSimon Guo 			if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1635acc9eb93SSimon Guo 				return EMULATE_FAIL;
1636acc9eb93SSimon Guo 
1637acc9eb93SSimon Guo 			break;
1638acc9eb93SSimon Guo 		case KVMPPC_VMX_COPY_WORD:
1639acc9eb93SSimon Guo 			if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1640acc9eb93SSimon Guo 				return EMULATE_FAIL;
1641acc9eb93SSimon Guo 			break;
1642acc9eb93SSimon Guo 		case KVMPPC_VMX_COPY_HWORD:
1643acc9eb93SSimon Guo 			if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1644acc9eb93SSimon Guo 				return EMULATE_FAIL;
1645acc9eb93SSimon Guo 			break;
1646acc9eb93SSimon Guo 		case KVMPPC_VMX_COPY_BYTE:
1647acc9eb93SSimon Guo 			if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
16488c99d345STianjia Zhang 				return EMULATE_FAIL;
164909f98496SJose Ricardo Ziviani 			break;
165009f98496SJose Ricardo Ziviani 		default:
165109f98496SJose Ricardo Ziviani 			return EMULATE_FAIL;
165209f98496SJose Ricardo Ziviani 		}
16538c99d345STianjia Zhang 
165409f98496SJose Ricardo Ziviani 		emulated = kvmppc_handle_store(vcpu, val, bytes,
1655acc9eb93SSimon Guo 				is_default_endian);
165609f98496SJose Ricardo Ziviani 		if (emulated != EMULATE_DONE)
165709f98496SJose Ricardo Ziviani 			break;
165809f98496SJose Ricardo Ziviani 
165909f98496SJose Ricardo Ziviani 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
166009f98496SJose Ricardo Ziviani 		vcpu->arch.mmio_vmx_copy_nums--;
16618c99d345STianjia Zhang 		vcpu->arch.mmio_vmx_offset++;
166209f98496SJose Ricardo Ziviani 	}
16638c99d345STianjia Zhang 
166409f98496SJose Ricardo Ziviani 	return emulated;
166509f98496SJose Ricardo Ziviani }
166609f98496SJose Ricardo Ziviani 
kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu * vcpu)166709f98496SJose Ricardo Ziviani static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
166809f98496SJose Ricardo Ziviani {
166909f98496SJose Ricardo Ziviani 	struct kvm_run *run = vcpu->run;
16708c99d345STianjia Zhang 	enum emulation_result emulated = EMULATE_FAIL;
1671acc9eb93SSimon Guo 	int r;
167209f98496SJose Ricardo Ziviani 
16738c99d345STianjia Zhang 	vcpu->arch.paddr_accessed += run->mmio.len;
1674acc9eb93SSimon Guo 
167509f98496SJose Ricardo Ziviani 	if (!vcpu->mmio_is_write) {
167609f98496SJose Ricardo Ziviani 		emulated = kvmppc_handle_vmx_load(vcpu,
167709f98496SJose Ricardo Ziviani 				vcpu->arch.io_gpr, run->mmio.len, 1);
167809f98496SJose Ricardo Ziviani 	} else {
167909f98496SJose Ricardo Ziviani 		emulated = kvmppc_handle_vmx_store(vcpu,
168009f98496SJose Ricardo Ziviani 				vcpu->arch.io_gpr, run->mmio.len, 1);
168109f98496SJose Ricardo Ziviani 	}
168209f98496SJose Ricardo Ziviani 
168309f98496SJose Ricardo Ziviani 	switch (emulated) {
168409f98496SJose Ricardo Ziviani 	case EMULATE_DO_MMIO:
168509f98496SJose Ricardo Ziviani 		run->exit_reason = KVM_EXIT_MMIO;
168609f98496SJose Ricardo Ziviani 		r = RESUME_HOST;
168709f98496SJose Ricardo Ziviani 		break;
168809f98496SJose Ricardo Ziviani 	case EMULATE_FAIL:
168909f98496SJose Ricardo Ziviani 		pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
169009f98496SJose Ricardo Ziviani 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
169109f98496SJose Ricardo Ziviani 		run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
169209f98496SJose Ricardo Ziviani 		r = RESUME_HOST;
169309f98496SJose Ricardo Ziviani 		break;
169409f98496SJose Ricardo Ziviani 	default:
169509f98496SJose Ricardo Ziviani 		r = RESUME_GUEST;
16968a41ea53SMihai Caraman 		break;
16978a41ea53SMihai Caraman 	}
16988a41ea53SMihai Caraman 	return r;
16998a41ea53SMihai Caraman }
17008a41ea53SMihai Caraman #endif /* CONFIG_ALTIVEC */
17018a41ea53SMihai Caraman 
kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)17028a41ea53SMihai Caraman int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
17038a41ea53SMihai Caraman {
17048a41ea53SMihai Caraman 	int r = 0;
17058a41ea53SMihai Caraman 	union kvmppc_one_reg val;
17068a41ea53SMihai Caraman 	int size;
17078a41ea53SMihai Caraman 
17088a41ea53SMihai Caraman 	size = one_reg_size(reg->id);
17098a41ea53SMihai Caraman 	if (size > sizeof(val))
17103840edc8SMihai Caraman 		return -EINVAL;
17113840edc8SMihai Caraman 
17123840edc8SMihai Caraman 	r = kvmppc_get_one_reg(vcpu, reg->id, &val);
17133840edc8SMihai Caraman 	if (r == -EINVAL) {
17143840edc8SMihai Caraman 		r = 0;
17153840edc8SMihai Caraman 		switch (reg->id) {
1716b4d7f161SGreg Kurz #ifdef CONFIG_ALTIVEC
17173840edc8SMihai Caraman 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
17183840edc8SMihai Caraman 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
17193840edc8SMihai Caraman 				r = -ENXIO;
17203840edc8SMihai Caraman 				break;
17213840edc8SMihai Caraman 			}
17223840edc8SMihai Caraman 			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1723b4d7f161SGreg Kurz 			break;
17243840edc8SMihai Caraman 		case KVM_REG_PPC_VSCR:
17253840edc8SMihai Caraman 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1726b4d7f161SGreg Kurz 				r = -ENXIO;
17273840edc8SMihai Caraman 				break;
17283840edc8SMihai Caraman 			}
17298a41ea53SMihai Caraman 			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
17308a41ea53SMihai Caraman 			break;
17318a41ea53SMihai Caraman 		case KVM_REG_PPC_VRSAVE:
17328a41ea53SMihai Caraman 			val = get_reg_val(reg->id, vcpu->arch.vrsave);
17338a41ea53SMihai Caraman 			break;
17348a41ea53SMihai Caraman #endif /* CONFIG_ALTIVEC */
17358a41ea53SMihai Caraman 		default:
17368a41ea53SMihai Caraman 			r = -EINVAL;
17378a41ea53SMihai Caraman 			break;
17388a41ea53SMihai Caraman 		}
17398a41ea53SMihai Caraman 	}
17408a41ea53SMihai Caraman 
17418a41ea53SMihai Caraman 	if (r)
17428a41ea53SMihai Caraman 		return r;
17438a41ea53SMihai Caraman 
17448a41ea53SMihai Caraman 	if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
17458a41ea53SMihai Caraman 		r = -EFAULT;
17468a41ea53SMihai Caraman 
17478a41ea53SMihai Caraman 	return r;
17488a41ea53SMihai Caraman }
17498a41ea53SMihai Caraman 
kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)17508a41ea53SMihai Caraman int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
17518a41ea53SMihai Caraman {
17528a41ea53SMihai Caraman 	int r;
17538a41ea53SMihai Caraman 	union kvmppc_one_reg val;
17548a41ea53SMihai Caraman 	int size;
17558a41ea53SMihai Caraman 
17568a41ea53SMihai Caraman 	size = one_reg_size(reg->id);
17578a41ea53SMihai Caraman 	if (size > sizeof(val))
17588a41ea53SMihai Caraman 		return -EINVAL;
17598a41ea53SMihai Caraman 
17608a41ea53SMihai Caraman 	if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
17613840edc8SMihai Caraman 		return -EFAULT;
17623840edc8SMihai Caraman 
17633840edc8SMihai Caraman 	r = kvmppc_set_one_reg(vcpu, reg->id, &val);
17643840edc8SMihai Caraman 	if (r == -EINVAL) {
17653840edc8SMihai Caraman 		r = 0;
17663840edc8SMihai Caraman 		switch (reg->id) {
1767b4d7f161SGreg Kurz #ifdef CONFIG_ALTIVEC
17683840edc8SMihai Caraman 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
17693840edc8SMihai Caraman 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
17703840edc8SMihai Caraman 				r = -ENXIO;
17713840edc8SMihai Caraman 				break;
17723840edc8SMihai Caraman 			}
17733840edc8SMihai Caraman 			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1774b4d7f161SGreg Kurz 			break;
17753840edc8SMihai Caraman 		case KVM_REG_PPC_VSCR:
17763840edc8SMihai Caraman 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1777b4d7f161SGreg Kurz 				r = -ENXIO;
1778b4d7f161SGreg Kurz 				break;
1779b4d7f161SGreg Kurz 			}
1780b4d7f161SGreg Kurz 			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1781b4d7f161SGreg Kurz 			break;
17823840edc8SMihai Caraman 		case KVM_REG_PPC_VRSAVE:
17833840edc8SMihai Caraman 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
17848a41ea53SMihai Caraman 				r = -ENXIO;
17858a41ea53SMihai Caraman 				break;
17868a41ea53SMihai Caraman 			}
17878a41ea53SMihai Caraman 			vcpu->arch.vrsave = set_reg_val(reg->id, val);
17888a41ea53SMihai Caraman 			break;
17898a41ea53SMihai Caraman #endif /* CONFIG_ALTIVEC */
17908a41ea53SMihai Caraman 		default:
17918a41ea53SMihai Caraman 			r = -EINVAL;
17928a41ea53SMihai Caraman 			break;
17931b94f6f8STianjia Zhang 		}
1794bbf45ba5SHollis Blanchard 	}
17951b94f6f8STianjia Zhang 
1796bbf45ba5SHollis Blanchard 	return r;
1797bbf45ba5SHollis Blanchard }
1798accb757dSChristoffer Dall 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1799accb757dSChristoffer Dall int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1800bbf45ba5SHollis Blanchard {
18016f63e81bSBin Lu 	struct kvm_run *run = vcpu->run;
1802bbf45ba5SHollis Blanchard 	int r;
18038c99d345STianjia Zhang 
18046f63e81bSBin Lu 	vcpu_load(vcpu);
18056f63e81bSBin Lu 
18066f63e81bSBin Lu 	if (vcpu->mmio_needed) {
18076f63e81bSBin Lu 		vcpu->mmio_needed = 0;
18086f63e81bSBin Lu 		if (!vcpu->mmio_is_write)
18096f63e81bSBin Lu 			kvmppc_complete_mmio_load(vcpu);
18106f63e81bSBin Lu #ifdef CONFIG_VSX
18118c99d345STianjia Zhang 		if (vcpu->arch.mmio_vsx_copy_nums > 0) {
18126f63e81bSBin Lu 			vcpu->arch.mmio_vsx_copy_nums--;
18136f63e81bSBin Lu 			vcpu->arch.mmio_vsx_offset++;
1814accb757dSChristoffer Dall 		}
18156f63e81bSBin Lu 
18166f63e81bSBin Lu 		if (vcpu->arch.mmio_vsx_copy_nums > 0) {
18176f63e81bSBin Lu 			r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
181809f98496SJose Ricardo Ziviani 			if (r == RESUME_HOST) {
1819acc9eb93SSimon Guo 				vcpu->mmio_needed = 1;
182009f98496SJose Ricardo Ziviani 				goto out;
1821acc9eb93SSimon Guo 			}
1822acc9eb93SSimon Guo 		}
182309f98496SJose Ricardo Ziviani #endif
182409f98496SJose Ricardo Ziviani #ifdef CONFIG_ALTIVEC
18258c99d345STianjia Zhang 		if (vcpu->arch.mmio_vmx_copy_nums > 0) {
182609f98496SJose Ricardo Ziviani 			vcpu->arch.mmio_vmx_copy_nums--;
182709f98496SJose Ricardo Ziviani 			vcpu->arch.mmio_vmx_offset++;
18281ab03c07SRadim Krčmář 		}
182909f98496SJose Ricardo Ziviani 
183009f98496SJose Ricardo Ziviani 		if (vcpu->arch.mmio_vmx_copy_nums > 0) {
183109f98496SJose Ricardo Ziviani 			r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1832ad0a048bSAlexander Graf 			if (r == RESUME_HOST) {
1833ad0a048bSAlexander Graf 				vcpu->mmio_needed = 1;
1834ad0a048bSAlexander Graf 				goto out;
1835ad0a048bSAlexander Graf 			}
1836ad0a048bSAlexander Graf 		}
1837ad0a048bSAlexander Graf #endif
1838ad0a048bSAlexander Graf 	} else if (vcpu->arch.osi_needed) {
1839de56a948SPaul Mackerras 		u64 *gprs = run->osi.gprs;
1840de56a948SPaul Mackerras 		int i;
1841de56a948SPaul Mackerras 
1842de56a948SPaul Mackerras 		for (i = 0; i < 32; i++)
1843de56a948SPaul Mackerras 			kvmppc_set_gpr(vcpu, i, gprs[i]);
1844de56a948SPaul Mackerras 		vcpu->arch.osi_needed = 0;
1845de56a948SPaul Mackerras 	} else if (vcpu->arch.hcall_needed) {
18461c810636SAlexander Graf 		int i;
18471c810636SAlexander Graf 
18481c810636SAlexander Graf 		kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
18491c810636SAlexander Graf 		for (i = 0; i < 9; ++i)
18501c810636SAlexander Graf 			kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1851bbf45ba5SHollis Blanchard 		vcpu->arch.hcall_needed = 0;
1852bbf45ba5SHollis Blanchard #ifdef CONFIG_BOOKE
185320b7035cSJan H. Schönherr 	} else if (vcpu->arch.epr_needed) {
18546f63e81bSBin Lu 		kvmppc_set_epr(vcpu, run->epr.epr);
1855460df4c1SPaolo Bonzini 		vcpu->arch.epr_needed = 0;
1856460df4c1SPaolo Bonzini #endif
1857460df4c1SPaolo Bonzini 	}
18588c99d345STianjia Zhang 
1859bbf45ba5SHollis Blanchard 	kvm_sigset_activate(vcpu);
186020b7035cSJan H. Schönherr 
1861bbf45ba5SHollis Blanchard 	if (run->immediate_exit)
1862c662f773SPaul Mackerras 		r = -EINTR;
1863accb757dSChristoffer Dall 	else
1864c662f773SPaul Mackerras 		r = kvmppc_vcpu_run(vcpu);
186536d014d3SFabiano Rosas 
186636d014d3SFabiano Rosas 	kvm_sigset_deactivate(vcpu);
186736d014d3SFabiano Rosas 
186836d014d3SFabiano Rosas #ifdef CONFIG_ALTIVEC
186936d014d3SFabiano Rosas out:
187036d014d3SFabiano Rosas #endif
187136d014d3SFabiano Rosas 
187236d014d3SFabiano Rosas 	/*
1873accb757dSChristoffer Dall 	 * We're already returning to userspace, don't pass the
1874bbf45ba5SHollis Blanchard 	 * RESUME_HOST flags along.
1875bbf45ba5SHollis Blanchard 	 */
1876bbf45ba5SHollis Blanchard 	if (r > 0)
1877bbf45ba5SHollis Blanchard 		r = 0;
1878bbf45ba5SHollis Blanchard 
187919ccb76aSPaul Mackerras 	vcpu_put(vcpu);
18804fe27d2aSPaul Mackerras 	return r;
188119ccb76aSPaul Mackerras }
188219ccb76aSPaul Mackerras 
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)188319ccb76aSPaul Mackerras int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
18849dd921cfSHollis Blanchard {
1885b6d33834SChristoffer Dall 	if (irq->irq == KVM_INTERRUPT_UNSET) {
1886dfd4d47eSScott Wood 		kvmppc_core_dequeue_external(vcpu);
188745c5eb67SHollis Blanchard 		return 0;
1888bbf45ba5SHollis Blanchard 	}
1889bbf45ba5SHollis Blanchard 
1890bbf45ba5SHollis Blanchard 	kvmppc_core_queue_external(vcpu, irq);
189171fbfd5fSAlexander Graf 
189271fbfd5fSAlexander Graf 	kvm_vcpu_kick(vcpu);
189371fbfd5fSAlexander Graf 
189471fbfd5fSAlexander Graf 	return 0;
189571fbfd5fSAlexander Graf }
189671fbfd5fSAlexander Graf 
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)189771fbfd5fSAlexander Graf static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
189871fbfd5fSAlexander Graf 				     struct kvm_enable_cap *cap)
189971fbfd5fSAlexander Graf {
1900ad0a048bSAlexander Graf 	int r;
1901ad0a048bSAlexander Graf 
1902ad0a048bSAlexander Graf 	if (cap->flags)
1903ad0a048bSAlexander Graf 		return -EINVAL;
1904930b412aSAlexander Graf 
1905930b412aSAlexander Graf 	switch (cap->cap) {
1906930b412aSAlexander Graf 	case KVM_CAP_PPC_OSI:
1907930b412aSAlexander Graf 		r = 0;
19081c810636SAlexander Graf 		vcpu->arch.osi_enabled = true;
19091c810636SAlexander Graf 		break;
19105df554adSScott Wood 	case KVM_CAP_PPC_PAPR:
19115df554adSScott Wood 		r = 0;
19125df554adSScott Wood 		vcpu->arch.papr_enabled = true;
19135df554adSScott Wood 		break;
19141c810636SAlexander Graf 	case KVM_CAP_PPC_EPR:
1915f61c94bbSBharat Bhushan 		r = 0;
1916f61c94bbSBharat Bhushan 		if (cap->args[0])
1917f61c94bbSBharat Bhushan 			vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1918f61c94bbSBharat Bhushan 		else
1919f61c94bbSBharat Bhushan 			vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1920f61c94bbSBharat Bhushan 		break;
1921bf7ca4bdSAlexander Graf #ifdef CONFIG_BOOKE
1922dc83b8bcSScott Wood 	case KVM_CAP_PPC_BOOKE_WATCHDOG:
1923dc83b8bcSScott Wood 		r = 0;
1924dc83b8bcSScott Wood 		vcpu->arch.watchdog_enabled = true;
1925dc83b8bcSScott Wood 		break;
1926dc83b8bcSScott Wood #endif
1927dc83b8bcSScott Wood #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1928dc83b8bcSScott Wood 	case KVM_CAP_SW_TLB: {
1929dc83b8bcSScott Wood 		struct kvm_config_tlb cfg;
1930dc83b8bcSScott Wood 		void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1931dc83b8bcSScott Wood 
1932dc83b8bcSScott Wood 		r = -EFAULT;
1933dc83b8bcSScott Wood 		if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1934eb1e4f43SScott Wood 			break;
1935eb1e4f43SScott Wood 
193670abadedSAl Viro 		r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1937eb1e4f43SScott Wood 		break;
1938eb1e4f43SScott Wood 	}
1939eb1e4f43SScott Wood #endif
194070abadedSAl Viro #ifdef CONFIG_KVM_MPIC
194170abadedSAl Viro 	case KVM_CAP_IRQ_MPIC: {
1942eb1e4f43SScott Wood 		struct fd f;
1943eb1e4f43SScott Wood 		struct kvm_device *dev;
1944eb1e4f43SScott Wood 
194570abadedSAl Viro 		r = -EBADF;
1946eb1e4f43SScott Wood 		f = fdget(cap->args[0]);
1947eb1e4f43SScott Wood 		if (!f.file)
1948eb1e4f43SScott Wood 			break;
194970abadedSAl Viro 
1950eb1e4f43SScott Wood 		r = -EPERM;
1951eb1e4f43SScott Wood 		dev = kvm_device_from_filp(f.file);
1952eb1e4f43SScott Wood 		if (dev)
19535975a2e0SPaul Mackerras 			r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
19545975a2e0SPaul Mackerras 
195570abadedSAl Viro 		fdput(f);
19565975a2e0SPaul Mackerras 		break;
19575975a2e0SPaul Mackerras 	}
19585975a2e0SPaul Mackerras #endif
195970abadedSAl Viro #ifdef CONFIG_KVM_XICS
196070abadedSAl Viro 	case KVM_CAP_IRQ_XICS: {
19615975a2e0SPaul Mackerras 		struct fd f;
19625975a2e0SPaul Mackerras 		struct kvm_device *dev;
19635975a2e0SPaul Mackerras 
196470abadedSAl Viro 		r = -EBADF;
19655af50993SBenjamin Herrenschmidt 		f = fdget(cap->args[0]);
196603f95332SPaul Mackerras 		if (!f.file)
19675af50993SBenjamin Herrenschmidt 			break;
19685af50993SBenjamin Herrenschmidt 
19695975a2e0SPaul Mackerras 		r = -EPERM;
19705af50993SBenjamin Herrenschmidt 		dev = kvm_device_from_filp(f.file);
19715975a2e0SPaul Mackerras 		if (dev) {
197270abadedSAl Viro 			if (xics_on_xive())
19735975a2e0SPaul Mackerras 				r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
19745975a2e0SPaul Mackerras 			else
19755975a2e0SPaul Mackerras 				r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1976eacc56bbSCédric Le Goater 		}
1977eacc56bbSCédric Le Goater 
1978eacc56bbSCédric Le Goater 		fdput(f);
1979eacc56bbSCédric Le Goater 		break;
1980eacc56bbSCédric Le Goater 	}
1981eacc56bbSCédric Le Goater #endif /* CONFIG_KVM_XICS */
1982eacc56bbSCédric Le Goater #ifdef CONFIG_KVM_XIVE
1983eacc56bbSCédric Le Goater 	case KVM_CAP_PPC_IRQ_XIVE: {
1984eacc56bbSCédric Le Goater 		struct fd f;
1985eacc56bbSCédric Le Goater 		struct kvm_device *dev;
1986eacc56bbSCédric Le Goater 
1987eacc56bbSCédric Le Goater 		r = -EBADF;
1988eacc56bbSCédric Le Goater 		f = fdget(cap->args[0]);
1989eacc56bbSCédric Le Goater 		if (!f.file)
1990eacc56bbSCédric Le Goater 			break;
1991eacc56bbSCédric Le Goater 
1992eacc56bbSCédric Le Goater 		r = -ENXIO;
1993eacc56bbSCédric Le Goater 		if (!xive_enabled())
1994eacc56bbSCédric Le Goater 			break;
1995eacc56bbSCédric Le Goater 
1996eacc56bbSCédric Le Goater 		r = -EPERM;
1997eacc56bbSCédric Le Goater 		dev = kvm_device_from_filp(f.file);
1998eacc56bbSCédric Le Goater 		if (dev)
1999eacc56bbSCédric Le Goater 			r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
2000134764edSAravinda Prasad 							    cap->args[1]);
2001134764edSAravinda Prasad 
2002134764edSAravinda Prasad 		fdput(f);
2003134764edSAravinda Prasad 		break;
2004134764edSAravinda Prasad 	}
2005134764edSAravinda Prasad #endif /* CONFIG_KVM_XIVE */
2006134764edSAravinda Prasad #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
2007134764edSAravinda Prasad 	case KVM_CAP_PPC_FWNMI:
2008134764edSAravinda Prasad 		r = -EINVAL;
200971fbfd5fSAlexander Graf 		if (!is_kvmppc_hv_enabled(vcpu->kvm))
201071fbfd5fSAlexander Graf 			break;
201171fbfd5fSAlexander Graf 		r = 0;
201271fbfd5fSAlexander Graf 		vcpu->kvm->arch.fwnmi_enabled = true;
201371fbfd5fSAlexander Graf 		break;
2014af8f38b3SAlexander Graf #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
2015af8f38b3SAlexander Graf 	default:
2016af8f38b3SAlexander Graf 		r = -EINVAL;
201771fbfd5fSAlexander Graf 		break;
201871fbfd5fSAlexander Graf 	}
201971fbfd5fSAlexander Graf 
202034a75b0fSPaul Mackerras 	if (!r)
202134a75b0fSPaul Mackerras 		r = kvmppc_sanity_check(vcpu);
202234a75b0fSPaul Mackerras 
202334a75b0fSPaul Mackerras 	return r;
202434a75b0fSPaul Mackerras }
202534a75b0fSPaul Mackerras 
kvm_arch_intc_initialized(struct kvm * kvm)202634a75b0fSPaul Mackerras bool kvm_arch_intc_initialized(struct kvm *kvm)
20275af50993SBenjamin Herrenschmidt {
202834a75b0fSPaul Mackerras #ifdef CONFIG_KVM_MPIC
202934a75b0fSPaul Mackerras 	if (kvm->arch.mpic)
203034a75b0fSPaul Mackerras 		return true;
203134a75b0fSPaul Mackerras #endif
203234a75b0fSPaul Mackerras #ifdef CONFIG_KVM_XICS
2033bbf45ba5SHollis Blanchard 	if (kvm->arch.xics || kvm->arch.xive)
2034bbf45ba5SHollis Blanchard 		return true;
2035bbf45ba5SHollis Blanchard #endif
2036bbf45ba5SHollis Blanchard 	return false;
2037bbf45ba5SHollis Blanchard }
2038bbf45ba5SHollis Blanchard 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)2039bbf45ba5SHollis Blanchard int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2040bbf45ba5SHollis Blanchard                                     struct kvm_mp_state *mp_state)
2041bbf45ba5SHollis Blanchard {
2042bbf45ba5SHollis Blanchard 	return -EINVAL;
2043bbf45ba5SHollis Blanchard }
2044bbf45ba5SHollis Blanchard 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)20455cb0944cSPaolo Bonzini int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2046bbf45ba5SHollis Blanchard                                     struct kvm_mp_state *mp_state)
2047bbf45ba5SHollis Blanchard {
2048bbf45ba5SHollis Blanchard 	return -EINVAL;
2049bbf45ba5SHollis Blanchard }
2050bbf45ba5SHollis Blanchard 
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)20519b062471SChristoffer Dall long kvm_arch_vcpu_async_ioctl(struct file *filp,
2052bbf45ba5SHollis Blanchard 			       unsigned int ioctl, unsigned long arg)
2053bbf45ba5SHollis Blanchard {
20549b062471SChristoffer Dall 	struct kvm_vcpu *vcpu = filp->private_data;
20559b062471SChristoffer Dall 	void __user *argp = (void __user *)arg;
2056bbf45ba5SHollis Blanchard 
20575cb0944cSPaolo Bonzini 	if (ioctl == KVM_INTERRUPT) {
20585cb0944cSPaolo Bonzini 		struct kvm_interrupt irq;
20595cb0944cSPaolo Bonzini 		if (copy_from_user(&irq, argp, sizeof(irq)))
20605cb0944cSPaolo Bonzini 			return -EFAULT;
20615cb0944cSPaolo Bonzini 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
20625cb0944cSPaolo Bonzini 	}
20635cb0944cSPaolo Bonzini 	return -ENOIOCTLCMD;
20645cb0944cSPaolo Bonzini }
20655cb0944cSPaolo Bonzini 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)206619483d14SAvi Kivity long kvm_arch_vcpu_ioctl(struct file *filp,
20679b062471SChristoffer Dall                          unsigned int ioctl, unsigned long arg)
206871fbfd5fSAlexander Graf {
206971fbfd5fSAlexander Graf 	struct kvm_vcpu *vcpu = filp->private_data;
207071fbfd5fSAlexander Graf 	void __user *argp = (void __user *)arg;
207171fbfd5fSAlexander Graf 	long r;
207271fbfd5fSAlexander Graf 
207371fbfd5fSAlexander Graf 	switch (ioctl) {
2074bc4188a2SNicholas Piggin 	case KVM_ENABLE_CAP:
207571fbfd5fSAlexander Graf 	{
2076b3cebfe8SSimon Guo 		struct kvm_enable_cap cap;
207771fbfd5fSAlexander Graf 		r = -EFAULT;
207871fbfd5fSAlexander Graf 		if (copy_from_user(&cap, argp, sizeof(cap)))
2079dc83b8bcSScott Wood 			goto out;
2080e24ed81fSAlexander Graf 		vcpu_load(vcpu);
2081e24ed81fSAlexander Graf 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2082e24ed81fSAlexander Graf 		vcpu_put(vcpu);
2083e24ed81fSAlexander Graf 		break;
2084e24ed81fSAlexander Graf 	}
2085e24ed81fSAlexander Graf 
2086e24ed81fSAlexander Graf 	case KVM_SET_ONE_REG:
2087e24ed81fSAlexander Graf 	case KVM_GET_ONE_REG:
2088e24ed81fSAlexander Graf 	{
2089e24ed81fSAlexander Graf 		struct kvm_one_reg reg;
2090e24ed81fSAlexander Graf 		r = -EFAULT;
2091e24ed81fSAlexander Graf 		if (copy_from_user(&reg, argp, sizeof(reg)))
2092e24ed81fSAlexander Graf 			goto out;
2093e24ed81fSAlexander Graf 		if (ioctl == KVM_SET_ONE_REG)
2094bf7ca4bdSAlexander Graf 			r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
2095dc83b8bcSScott Wood 		else
2096dc83b8bcSScott Wood 			r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
2097dc83b8bcSScott Wood 		break;
2098dc83b8bcSScott Wood 	}
2099dc83b8bcSScott Wood 
2100bc4188a2SNicholas Piggin #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2101dc83b8bcSScott Wood 	case KVM_DIRTY_TLB: {
2102b3cebfe8SSimon Guo 		struct kvm_dirty_tlb dirty;
2103dc83b8bcSScott Wood 		r = -EFAULT;
2104dc83b8bcSScott Wood 		if (copy_from_user(&dirty, argp, sizeof(dirty)))
2105dc83b8bcSScott Wood 			goto out;
2106bbf45ba5SHollis Blanchard 		vcpu_load(vcpu);
2107bbf45ba5SHollis Blanchard 		r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2108bbf45ba5SHollis Blanchard 		vcpu_put(vcpu);
2109bbf45ba5SHollis Blanchard 		break;
2110bbf45ba5SHollis Blanchard 	}
2111bbf45ba5SHollis Blanchard #endif
2112bbf45ba5SHollis Blanchard 	default:
2113bbf45ba5SHollis Blanchard 		r = -EINVAL;
21141499fa80SSouptick Joarder 	}
21155b1c1493SCarsten Otte 
21165b1c1493SCarsten Otte out:
21175b1c1493SCarsten Otte 	return r;
21185b1c1493SCarsten Otte }
211915711e9cSAlexander Graf 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)212015711e9cSAlexander Graf vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2121784bafacSStuart Yoder {
2122784bafacSStuart Yoder 	return VM_FAULT_SIGBUS;
2123784bafacSStuart Yoder }
21242743103fSAlexander Graf 
kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo * pvinfo)21252743103fSAlexander Graf static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
21262743103fSAlexander Graf {
21272743103fSAlexander Graf 	u32 inst_nop = 0x60000000;
2128784bafacSStuart Yoder #ifdef CONFIG_KVM_BOOKE_HV
212915711e9cSAlexander Graf 	u32 inst_sc1 = 0x44000022;
213015711e9cSAlexander Graf 	pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
213115711e9cSAlexander Graf 	pvinfo->hcall[1] = cpu_to_be32(inst_nop);
213215711e9cSAlexander Graf 	pvinfo->hcall[2] = cpu_to_be32(inst_nop);
213315711e9cSAlexander Graf 	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
213415711e9cSAlexander Graf #else
213515711e9cSAlexander Graf 	u32 inst_lis = 0x3c000000;
213615711e9cSAlexander Graf 	u32 inst_ori = 0x60000000;
213715711e9cSAlexander Graf 	u32 inst_sc = 0x44000002;
213815711e9cSAlexander Graf 	u32 inst_imm_mask = 0xffff;
213915711e9cSAlexander Graf 
214015711e9cSAlexander Graf 	/*
214115711e9cSAlexander Graf 	 * The hypercall to get into KVM from within guest context is as
214215711e9cSAlexander Graf 	 * follows:
21432743103fSAlexander Graf 	 *
21442743103fSAlexander Graf 	 *    lis r0, r0, KVM_SC_MAGIC_R0@h
21452743103fSAlexander Graf 	 *    ori r0, KVM_SC_MAGIC_R0@l
21462743103fSAlexander Graf 	 *    sc
2147784bafacSStuart Yoder 	 *    nop
214815711e9cSAlexander Graf 	 */
21499202e076SLiu Yu-B13201 	pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
21509202e076SLiu Yu-B13201 	pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
215115711e9cSAlexander Graf 	pvinfo->hcall[2] = cpu_to_be32(inst_sc);
215215711e9cSAlexander Graf 	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
215315711e9cSAlexander Graf #endif
2154d663b8a2SPaolo Bonzini 
2155d663b8a2SPaolo Bonzini 	pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2156d663b8a2SPaolo Bonzini 
2157d663b8a2SPaolo Bonzini 	return 0;
2158d663b8a2SPaolo Bonzini }
2159d663b8a2SPaolo Bonzini 
kvm_arch_irqchip_in_kernel(struct kvm * kvm)2160d663b8a2SPaolo Bonzini bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
2161d663b8a2SPaolo Bonzini {
2162d663b8a2SPaolo Bonzini 	int ret = 0;
2163d663b8a2SPaolo Bonzini 
2164d663b8a2SPaolo Bonzini #ifdef CONFIG_KVM_MPIC
2165d663b8a2SPaolo Bonzini 	ret = ret || (kvm->arch.mpic != NULL);
2166d663b8a2SPaolo Bonzini #endif
2167d663b8a2SPaolo Bonzini #ifdef CONFIG_KVM_XICS
2168d663b8a2SPaolo Bonzini 	ret = ret || (kvm->arch.xics != NULL);
21695efdb4beSAlexander Graf 	ret = ret || (kvm->arch.xive != NULL);
21705efdb4beSAlexander Graf #endif
21715efdb4beSAlexander Graf 	smp_rmb();
2172d663b8a2SPaolo Bonzini 	return ret;
21735efdb4beSAlexander Graf }
21745efdb4beSAlexander Graf 
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irq_event,bool line_status)21755efdb4beSAlexander Graf int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
21765efdb4beSAlexander Graf 			  bool line_status)
21775efdb4beSAlexander Graf {
21785efdb4beSAlexander Graf 	if (!kvm_arch_irqchip_in_kernel(kvm))
21795efdb4beSAlexander Graf 		return -ENXIO;
21805efdb4beSAlexander Graf 
2181699a0ea0SPaul Mackerras 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2182e5d83c74SPaolo Bonzini 					irq_event->irq, irq_event->level,
2183699a0ea0SPaul Mackerras 					line_status);
2184699a0ea0SPaul Mackerras 	return 0;
2185699a0ea0SPaul Mackerras }
2186699a0ea0SPaul Mackerras 
2187699a0ea0SPaul Mackerras 
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)2188699a0ea0SPaul Mackerras int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2189699a0ea0SPaul Mackerras 			    struct kvm_enable_cap *cap)
2190699a0ea0SPaul Mackerras {
2191699a0ea0SPaul Mackerras 	int r;
2192699a0ea0SPaul Mackerras 
2193699a0ea0SPaul Mackerras 	if (cap->flags)
2194699a0ea0SPaul Mackerras 		return -EINVAL;
2195699a0ea0SPaul Mackerras 
2196699a0ea0SPaul Mackerras 	switch (cap->cap) {
2197699a0ea0SPaul Mackerras #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2198699a0ea0SPaul Mackerras 	case KVM_CAP_PPC_ENABLE_HCALL: {
2199ae2113a4SPaul Mackerras 		unsigned long hcall = cap->args[0];
2200ae2113a4SPaul Mackerras 
2201699a0ea0SPaul Mackerras 		r = -EINVAL;
2202699a0ea0SPaul Mackerras 		if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2203699a0ea0SPaul Mackerras 		    cap->args[1] > 1)
2204699a0ea0SPaul Mackerras 			break;
2205699a0ea0SPaul Mackerras 		if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2206699a0ea0SPaul Mackerras 			break;
2207699a0ea0SPaul Mackerras 		if (cap->args[1])
22083c313524SPaul Mackerras 			set_bit(hcall / 4, kvm->arch.enabled_hcalls);
22093c313524SPaul Mackerras 		else
22103c313524SPaul Mackerras 			clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
22113c313524SPaul Mackerras 		r = 0;
22123c313524SPaul Mackerras 		break;
22133c313524SPaul Mackerras 	}
22143c313524SPaul Mackerras 	case KVM_CAP_PPC_SMT: {
22153c313524SPaul Mackerras 		unsigned long mode = cap->args[0];
22163c313524SPaul Mackerras 		unsigned long flags = cap->args[1];
2217aa069a99SPaul Mackerras 
2218aa069a99SPaul Mackerras 		r = -EINVAL;
2219aa069a99SPaul Mackerras 		if (kvm->arch.kvm_ops->set_smt_mode)
2220aa069a99SPaul Mackerras 			r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2221aa069a99SPaul Mackerras 		break;
2222aa069a99SPaul Mackerras 	}
2223aa069a99SPaul Mackerras 
2224aa069a99SPaul Mackerras 	case KVM_CAP_PPC_NESTED_HV:
2225699a0ea0SPaul Mackerras 		r = -EINVAL;
22269a5788c6SPaul Mackerras 		if (!is_kvmppc_hv_enabled(kvm) ||
22279a5788c6SPaul Mackerras 		    !kvm->arch.kvm_ops->enable_nested)
22289a5788c6SPaul Mackerras 			break;
22299a5788c6SPaul Mackerras 		r = kvm->arch.kvm_ops->enable_nested(kvm);
22309a5788c6SPaul Mackerras 		break;
22319a5788c6SPaul Mackerras #endif
22329a5788c6SPaul Mackerras #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2233d9a47edaSRavi Bangoria 	case KVM_CAP_PPC_SECURE_GUEST:
2234d9a47edaSRavi Bangoria 		r = -EINVAL;
2235d9a47edaSRavi Bangoria 		if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2236d9a47edaSRavi Bangoria 			break;
2237d9a47edaSRavi Bangoria 		r = kvm->arch.kvm_ops->enable_svm(kvm);
2238d9a47edaSRavi Bangoria 		break;
22399a5788c6SPaul Mackerras 	case KVM_CAP_PPC_DAWR1:
2240699a0ea0SPaul Mackerras 		r = -EINVAL;
2241699a0ea0SPaul Mackerras 		if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2242699a0ea0SPaul Mackerras 			break;
2243699a0ea0SPaul Mackerras 		r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2244699a0ea0SPaul Mackerras 		break;
2245699a0ea0SPaul Mackerras #endif
2246699a0ea0SPaul Mackerras 	default:
2247699a0ea0SPaul Mackerras 		r = -EINVAL;
22483214d01fSPaul Mackerras 		break;
22493214d01fSPaul Mackerras 	}
22503214d01fSPaul Mackerras 
22513214d01fSPaul Mackerras 	return r;
22523214d01fSPaul Mackerras }
22533214d01fSPaul Mackerras 
22543214d01fSPaul Mackerras #ifdef CONFIG_PPC_BOOK3S_64
22553214d01fSPaul Mackerras /*
22563214d01fSPaul Mackerras  * These functions check whether the underlying hardware is safe
22573214d01fSPaul Mackerras  * against attacks based on observing the effects of speculatively
22583214d01fSPaul Mackerras  * executed instructions, and whether it supplies instructions for
22593214d01fSPaul Mackerras  * use in workarounds.  The information comes from firmware, either
22603214d01fSPaul Mackerras  * via the device tree on powernv platforms or from an hcall on
22613214d01fSPaul Mackerras  * pseries platforms.
22623214d01fSPaul Mackerras  */
22633214d01fSPaul Mackerras #ifdef CONFIG_PPC_PSERIES
pseries_get_cpu_char(struct kvm_ppc_cpu_char * cp)22643214d01fSPaul Mackerras static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
22653214d01fSPaul Mackerras {
22663214d01fSPaul Mackerras 	struct h_cpu_char_result c;
22673214d01fSPaul Mackerras 	unsigned long rc;
22683214d01fSPaul Mackerras 
22693214d01fSPaul Mackerras 	if (!machine_is(pseries))
22703214d01fSPaul Mackerras 		return -ENOTTY;
22713214d01fSPaul Mackerras 
22723214d01fSPaul Mackerras 	rc = plpar_get_cpu_characteristics(&c);
22733214d01fSPaul Mackerras 	if (rc == H_SUCCESS) {
22743214d01fSPaul Mackerras 		cp->character = c.character;
22753214d01fSPaul Mackerras 		cp->behaviour = c.behaviour;
22763214d01fSPaul Mackerras 		cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
22772b57ecd0SSuraj Jitindar Singh 			KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
22782b57ecd0SSuraj Jitindar Singh 			KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
22793214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
22803214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
22812b57ecd0SSuraj Jitindar Singh 			KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
22822b57ecd0SSuraj Jitindar Singh 			KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
22833214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
22843214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
22853214d01fSPaul Mackerras 		cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
22863214d01fSPaul Mackerras 			KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
22873214d01fSPaul Mackerras 			KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
22883214d01fSPaul Mackerras 			KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
22893214d01fSPaul Mackerras 	}
22903214d01fSPaul Mackerras 	return 0;
22913214d01fSPaul Mackerras }
22923214d01fSPaul Mackerras #else
pseries_get_cpu_char(struct kvm_ppc_cpu_char * cp)22933214d01fSPaul Mackerras static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
22943214d01fSPaul Mackerras {
22953214d01fSPaul Mackerras 	return -ENOTTY;
22963214d01fSPaul Mackerras }
22973214d01fSPaul Mackerras #endif
22983214d01fSPaul Mackerras 
have_fw_feat(struct device_node * fw_features,const char * state,const char * name)22993214d01fSPaul Mackerras static inline bool have_fw_feat(struct device_node *fw_features,
23003214d01fSPaul Mackerras 				const char *state, const char *name)
23013214d01fSPaul Mackerras {
23023214d01fSPaul Mackerras 	struct device_node *np;
23033214d01fSPaul Mackerras 	bool r = false;
23043214d01fSPaul Mackerras 
23053214d01fSPaul Mackerras 	np = of_get_child_by_name(fw_features, name);
23063214d01fSPaul Mackerras 	if (np) {
23073214d01fSPaul Mackerras 		r = of_property_read_bool(np, state);
23083214d01fSPaul Mackerras 		of_node_put(np);
23093214d01fSPaul Mackerras 	}
23103214d01fSPaul Mackerras 	return r;
23113214d01fSPaul Mackerras }
23123214d01fSPaul Mackerras 
kvmppc_get_cpu_char(struct kvm_ppc_cpu_char * cp)23133214d01fSPaul Mackerras static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
23143214d01fSPaul Mackerras {
23153214d01fSPaul Mackerras 	struct device_node *np, *fw_features;
23163214d01fSPaul Mackerras 	int r;
23173214d01fSPaul Mackerras 
23183214d01fSPaul Mackerras 	memset(cp, 0, sizeof(*cp));
23193214d01fSPaul Mackerras 	r = pseries_get_cpu_char(cp);
23203214d01fSPaul Mackerras 	if (r != -ENOTTY)
23213214d01fSPaul Mackerras 		return r;
23223214d01fSPaul Mackerras 
23233214d01fSPaul Mackerras 	np = of_find_node_by_name(NULL, "ibm,opal");
23243214d01fSPaul Mackerras 	if (np) {
23253214d01fSPaul Mackerras 		fw_features = of_get_child_by_name(np, "fw-features");
23263214d01fSPaul Mackerras 		of_node_put(np);
23273214d01fSPaul Mackerras 		if (!fw_features)
23283214d01fSPaul Mackerras 			return 0;
23293214d01fSPaul Mackerras 		if (have_fw_feat(fw_features, "enabled",
23303214d01fSPaul Mackerras 				 "inst-spec-barrier-ori31,31,0"))
23313214d01fSPaul Mackerras 			cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
23323214d01fSPaul Mackerras 		if (have_fw_feat(fw_features, "enabled",
23333214d01fSPaul Mackerras 				 "fw-bcctrl-serialized"))
23343214d01fSPaul Mackerras 			cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
23353214d01fSPaul Mackerras 		if (have_fw_feat(fw_features, "enabled",
23363214d01fSPaul Mackerras 				 "inst-l1d-flush-ori30,30,0"))
23373214d01fSPaul Mackerras 			cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
23383214d01fSPaul Mackerras 		if (have_fw_feat(fw_features, "enabled",
23393214d01fSPaul Mackerras 				 "inst-l1d-flush-trig2"))
23403214d01fSPaul Mackerras 			cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
23412b57ecd0SSuraj Jitindar Singh 		if (have_fw_feat(fw_features, "enabled",
23422b57ecd0SSuraj Jitindar Singh 				 "fw-l1d-thread-split"))
23432b57ecd0SSuraj Jitindar Singh 			cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
23443214d01fSPaul Mackerras 		if (have_fw_feat(fw_features, "enabled",
23453214d01fSPaul Mackerras 				 "fw-count-cache-disabled"))
23463214d01fSPaul Mackerras 			cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
23473214d01fSPaul Mackerras 		if (have_fw_feat(fw_features, "enabled",
23483214d01fSPaul Mackerras 				 "fw-count-cache-flush-bcctr2,0,0"))
23492b57ecd0SSuraj Jitindar Singh 			cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
23502b57ecd0SSuraj Jitindar Singh 		cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
23513214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
23523214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
23533214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
23543214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
23553214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
23563214d01fSPaul Mackerras 			KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
23573214d01fSPaul Mackerras 
23583214d01fSPaul Mackerras 		if (have_fw_feat(fw_features, "enabled",
23593214d01fSPaul Mackerras 				 "speculation-policy-favor-security"))
23603214d01fSPaul Mackerras 			cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
23612b57ecd0SSuraj Jitindar Singh 		if (!have_fw_feat(fw_features, "disabled",
23622b57ecd0SSuraj Jitindar Singh 				  "needs-l1d-flush-msr-pr-0-to-1"))
23632b57ecd0SSuraj Jitindar Singh 			cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
23643214d01fSPaul Mackerras 		if (!have_fw_feat(fw_features, "disabled",
23653214d01fSPaul Mackerras 				  "needs-spec-barrier-for-bound-checks"))
23662b57ecd0SSuraj Jitindar Singh 			cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
23672b57ecd0SSuraj Jitindar Singh 		if (have_fw_feat(fw_features, "enabled",
23683214d01fSPaul Mackerras 				 "needs-count-cache-flush-on-context-switch"))
23693214d01fSPaul Mackerras 			cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
23703214d01fSPaul Mackerras 		cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
23713214d01fSPaul Mackerras 			KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
23723214d01fSPaul Mackerras 			KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
23733214d01fSPaul Mackerras 			KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
23743214d01fSPaul Mackerras 
23753214d01fSPaul Mackerras 		of_node_put(fw_features);
2376bbf45ba5SHollis Blanchard 	}
2377bbf45ba5SHollis Blanchard 
2378bbf45ba5SHollis Blanchard 	return 0;
23795df554adSScott Wood }
238015711e9cSAlexander Graf #endif
2381bbf45ba5SHollis Blanchard 
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)2382bbf45ba5SHollis Blanchard int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
2383bbf45ba5SHollis Blanchard {
238415711e9cSAlexander Graf 	struct kvm *kvm __maybe_unused = filp->private_data;
238515711e9cSAlexander Graf 	void __user *argp = (void __user *)arg;
2386d8cdddcdSVasiliy Kulikov 	int r;
238715711e9cSAlexander Graf 
238815711e9cSAlexander Graf 	switch (ioctl) {
238915711e9cSAlexander Graf 	case KVM_PPC_GET_PVINFO: {
239015711e9cSAlexander Graf 		struct kvm_ppc_pvinfo pvinfo;
239115711e9cSAlexander Graf 		memset(&pvinfo, 0, sizeof(pvinfo));
239215711e9cSAlexander Graf 		r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
239315711e9cSAlexander Graf 		if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
239415711e9cSAlexander Graf 			r = -EFAULT;
239576d837a4SPaul Mackerras 			goto out;
239658ded420SAlexey Kardashevskiy 		}
239758ded420SAlexey Kardashevskiy 
239858ded420SAlexey Kardashevskiy 		break;
239958ded420SAlexey Kardashevskiy 	}
240058ded420SAlexey Kardashevskiy #ifdef CONFIG_SPAPR_TCE_IOMMU
240158ded420SAlexey Kardashevskiy 	case KVM_CREATE_SPAPR_TCE_64: {
240258ded420SAlexey Kardashevskiy 		struct kvm_create_spapr_tce_64 create_tce_64;
240358ded420SAlexey Kardashevskiy 
240458ded420SAlexey Kardashevskiy 		r = -EFAULT;
240558ded420SAlexey Kardashevskiy 		if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
240658ded420SAlexey Kardashevskiy 			goto out;
240758ded420SAlexey Kardashevskiy 		if (create_tce_64.flags) {
240858ded420SAlexey Kardashevskiy 			r = -EINVAL;
240954738c09SDavid Gibson 			goto out;
241054738c09SDavid Gibson 		}
241158ded420SAlexey Kardashevskiy 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
241254738c09SDavid Gibson 		goto out;
241354738c09SDavid Gibson 	}
241454738c09SDavid Gibson 	case KVM_CREATE_SPAPR_TCE: {
241554738c09SDavid Gibson 		struct kvm_create_spapr_tce create_tce;
241658ded420SAlexey Kardashevskiy 		struct kvm_create_spapr_tce_64 create_tce_64;
241758ded420SAlexey Kardashevskiy 
241858ded420SAlexey Kardashevskiy 		r = -EFAULT;
241958ded420SAlexey Kardashevskiy 		if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
242058ded420SAlexey Kardashevskiy 			goto out;
242158ded420SAlexey Kardashevskiy 
242258ded420SAlexey Kardashevskiy 		create_tce_64.liobn = create_tce.liobn;
242358ded420SAlexey Kardashevskiy 		create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
242454738c09SDavid Gibson 		create_tce_64.offset = 0;
242554738c09SDavid Gibson 		create_tce_64.size = create_tce.window_size >>
242676d837a4SPaul Mackerras 				IOMMU_PAGE_SHIFT_4K;
242776d837a4SPaul Mackerras 		create_tce_64.flags = 0;
24285b74716eSBenjamin Herrenschmidt 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
24295b74716eSBenjamin Herrenschmidt 		goto out;
2430cbbc58d4SAneesh Kumar K.V 	}
24315b74716eSBenjamin Herrenschmidt #endif
24325b74716eSBenjamin Herrenschmidt #ifdef CONFIG_PPC_BOOK3S_64
2433cbbc58d4SAneesh Kumar K.V 	case KVM_PPC_GET_SMMU_INFO: {
24345b74716eSBenjamin Herrenschmidt 		struct kvm_ppc_smmu_info info;
24355b74716eSBenjamin Herrenschmidt 		struct kvm *kvm = filp->private_data;
24365b74716eSBenjamin Herrenschmidt 
24375b74716eSBenjamin Herrenschmidt 		memset(&info, 0, sizeof(info));
24388e591cb7SMichael Ellerman 		r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
24398e591cb7SMichael Ellerman 		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
24408e591cb7SMichael Ellerman 			r = -EFAULT;
24418e591cb7SMichael Ellerman 		break;
24428e591cb7SMichael Ellerman 	}
24438e591cb7SMichael Ellerman 	case KVM_PPC_RTAS_DEFINE_TOKEN: {
2444c9270132SPaul Mackerras 		struct kvm *kvm = filp->private_data;
2445c9270132SPaul Mackerras 
2446c9270132SPaul Mackerras 		r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2447c9270132SPaul Mackerras 		break;
2448c9270132SPaul Mackerras 	}
2449c9270132SPaul Mackerras 	case KVM_PPC_CONFIGURE_V3_MMU: {
2450c9270132SPaul Mackerras 		struct kvm *kvm = filp->private_data;
2451c9270132SPaul Mackerras 		struct kvm_ppc_mmuv3_cfg cfg;
2452c9270132SPaul Mackerras 
2453c9270132SPaul Mackerras 		r = -EINVAL;
2454c9270132SPaul Mackerras 		if (!kvm->arch.kvm_ops->configure_mmu)
2455c9270132SPaul Mackerras 			goto out;
2456c9270132SPaul Mackerras 		r = -EFAULT;
2457c9270132SPaul Mackerras 		if (copy_from_user(&cfg, argp, sizeof(cfg)))
2458c9270132SPaul Mackerras 			goto out;
2459c9270132SPaul Mackerras 		r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2460c9270132SPaul Mackerras 		break;
2461c9270132SPaul Mackerras 	}
2462c9270132SPaul Mackerras 	case KVM_PPC_GET_RMMU_INFO: {
2463c9270132SPaul Mackerras 		struct kvm *kvm = filp->private_data;
2464c9270132SPaul Mackerras 		struct kvm_ppc_rmmu_info info;
2465c9270132SPaul Mackerras 
2466c9270132SPaul Mackerras 		r = -EINVAL;
2467c9270132SPaul Mackerras 		if (!kvm->arch.kvm_ops->get_rmmu_info)
2468c9270132SPaul Mackerras 			goto out;
24693214d01fSPaul Mackerras 		r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
24703214d01fSPaul Mackerras 		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
24713214d01fSPaul Mackerras 			r = -EFAULT;
24723214d01fSPaul Mackerras 		break;
24733214d01fSPaul Mackerras 	}
24743214d01fSPaul Mackerras 	case KVM_PPC_GET_CPU_CHAR: {
24753214d01fSPaul Mackerras 		struct kvm_ppc_cpu_char cpuchar;
24763214d01fSPaul Mackerras 
247722945688SBharata B Rao 		r = kvmppc_get_cpu_char(&cpuchar);
247822945688SBharata B Rao 		if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
247922945688SBharata B Rao 			r = -EFAULT;
248022945688SBharata B Rao 		break;
248122945688SBharata B Rao 	}
248222945688SBharata B Rao 	case KVM_PPC_SVM_OFF: {
248322945688SBharata B Rao 		struct kvm *kvm = filp->private_data;
248422945688SBharata B Rao 
248522945688SBharata B Rao 		r = 0;
248622945688SBharata B Rao 		if (!kvm->arch.kvm_ops->svm_off)
2487cbbc58d4SAneesh Kumar K.V 			goto out;
2488cbbc58d4SAneesh Kumar K.V 
2489cbbc58d4SAneesh Kumar K.V 		r = kvm->arch.kvm_ops->svm_off(kvm);
2490cbbc58d4SAneesh Kumar K.V 		break;
24913a167beaSAneesh Kumar K.V 	}
2492bbf45ba5SHollis Blanchard 	default: {
2493367e1319SAvi Kivity 		struct kvm *kvm = filp->private_data;
24943a167beaSAneesh Kumar K.V 		r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2495bbf45ba5SHollis Blanchard 	}
249615711e9cSAlexander Graf #else /* CONFIG_PPC_BOOK3S_64 */
2497bbf45ba5SHollis Blanchard 	default:
2498bbf45ba5SHollis Blanchard 		r = -ENOTTY;
2499bbf45ba5SHollis Blanchard #endif
25006ba2a292SNicholas Piggin 	}
2501043cc4d7SScott Wood out:
2502043cc4d7SScott Wood 	return r;
2503043cc4d7SScott Wood }
2504043cc4d7SScott Wood 
25056ba2a292SNicholas Piggin static DEFINE_IDA(lpid_inuse);
2506043cc4d7SScott Wood static unsigned long nr_lpids;
25076ba2a292SNicholas Piggin 
kvmppc_alloc_lpid(void)25086ba2a292SNicholas Piggin long kvmppc_alloc_lpid(void)
25096ba2a292SNicholas Piggin {
25106ba2a292SNicholas Piggin 	int lpid;
25116ba2a292SNicholas Piggin 
25126ba2a292SNicholas Piggin 	/* The host LPID must always be 0 (allocation starts at 1) */
2513043cc4d7SScott Wood 	lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
2514043cc4d7SScott Wood 	if (lpid < 0) {
2515043cc4d7SScott Wood 		if (lpid == -ENOMEM)
2516043cc4d7SScott Wood 			pr_err("%s: Out of memory\n", __func__);
2517043cc4d7SScott Wood 		else
2518043cc4d7SScott Wood 			pr_err("%s: No LPIDs free\n", __func__);
25192ba9f0d8SAneesh Kumar K.V 		return -ENOMEM;
2520043cc4d7SScott Wood 	}
2521043cc4d7SScott Wood 
2522043cc4d7SScott Wood 	return lpid;
25236ba2a292SNicholas Piggin }
2524043cc4d7SScott Wood EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
25252ba9f0d8SAneesh Kumar K.V 
kvmppc_free_lpid(long lpid)2526043cc4d7SScott Wood void kvmppc_free_lpid(long lpid)
25276ba2a292SNicholas Piggin {
2528043cc4d7SScott Wood 	ida_free(&lpid_inuse, lpid);
2529043cc4d7SScott Wood }
25306ba2a292SNicholas Piggin EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2531043cc4d7SScott Wood 
25322ba9f0d8SAneesh Kumar K.V /* nr_lpids_param includes the host LPID */
kvmppc_init_lpid(unsigned long nr_lpids_param)2533043cc4d7SScott Wood void kvmppc_init_lpid(unsigned long nr_lpids_param)
2534478d6686SPaolo Bonzini {
2535faf01aefSAlexey Kardashevskiy 	nr_lpids = nr_lpids_param;
2536faf01aefSAlexey Kardashevskiy }
2537faf01aefSAlexey Kardashevskiy EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2538faf01aefSAlexey Kardashevskiy 
2539faf01aefSAlexey Kardashevskiy EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2540faf01aefSAlexey Kardashevskiy 
kvm_arch_create_vcpu_debugfs(struct kvm_vcpu * vcpu,struct dentry * debugfs_dentry)2541faf01aefSAlexey Kardashevskiy void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
2542faf01aefSAlexey Kardashevskiy {
2543faf01aefSAlexey Kardashevskiy 	if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
2544faf01aefSAlexey Kardashevskiy 		vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
2545faf01aefSAlexey Kardashevskiy }
2546faf01aefSAlexey Kardashevskiy 
kvm_arch_create_vm_debugfs(struct kvm * kvm)2547faf01aefSAlexey Kardashevskiy int kvm_arch_create_vm_debugfs(struct kvm *kvm)
2548 {
2549 	if (kvm->arch.kvm_ops->create_vm_debugfs)
2550 		kvm->arch.kvm_ops->create_vm_debugfs(kvm);
2551 	return 0;
2552 }
2553