xref: /openbmc/linux/arch/powerpc/kvm/powerpc.c (revision 6562c9ac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright IBM Corp. 2007
5  *
6  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
8  */
9 
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hrtimer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/fs.h>
17 #include <linux/slab.h>
18 #include <linux/file.h>
19 #include <linux/module.h>
20 #include <linux/irqbypass.h>
21 #include <linux/kvm_irqfd.h>
22 #include <linux/of.h>
23 #include <asm/cputable.h>
24 #include <linux/uaccess.h>
25 #include <asm/kvm_ppc.h>
26 #include <asm/cputhreads.h>
27 #include <asm/irqflags.h>
28 #include <asm/iommu.h>
29 #include <asm/switch_to.h>
30 #include <asm/xive.h>
31 #ifdef CONFIG_PPC_PSERIES
32 #include <asm/hvcall.h>
33 #include <asm/plpar_wrappers.h>
34 #endif
35 #include <asm/ultravisor.h>
36 #include <asm/setup.h>
37 
38 #include "timing.h"
39 #include "irq.h"
40 #include "../mm/mmu_decl.h"
41 
42 #define CREATE_TRACE_POINTS
43 #include "trace.h"
44 
45 struct kvmppc_ops *kvmppc_hv_ops;
46 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
47 struct kvmppc_ops *kvmppc_pr_ops;
48 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
49 
50 
51 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
52 {
53 	return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
54 }
55 
56 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
57 {
58 	return kvm_arch_vcpu_runnable(vcpu);
59 }
60 
61 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
62 {
63 	return false;
64 }
65 
66 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
67 {
68 	return 1;
69 }
70 
71 /*
72  * Common checks before entering the guest world.  Call with interrupts
73  * disabled.
74  *
75  * returns:
76  *
77  * == 1 if we're ready to go into guest state
78  * <= 0 if we need to go back to the host with return value
79  */
80 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
81 {
82 	int r;
83 
84 	WARN_ON(irqs_disabled());
85 	hard_irq_disable();
86 
87 	while (true) {
88 		if (need_resched()) {
89 			local_irq_enable();
90 			cond_resched();
91 			hard_irq_disable();
92 			continue;
93 		}
94 
95 		if (signal_pending(current)) {
96 			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
97 			vcpu->run->exit_reason = KVM_EXIT_INTR;
98 			r = -EINTR;
99 			break;
100 		}
101 
102 		vcpu->mode = IN_GUEST_MODE;
103 
104 		/*
105 		 * Reading vcpu->requests must happen after setting vcpu->mode,
106 		 * so we don't miss a request because the requester sees
107 		 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
108 		 * before next entering the guest (and thus doesn't IPI).
109 		 * This also orders the write to mode from any reads
110 		 * to the page tables done while the VCPU is running.
111 		 * Please see the comment in kvm_flush_remote_tlbs.
112 		 */
113 		smp_mb();
114 
115 		if (kvm_request_pending(vcpu)) {
116 			/* Make sure we process requests preemptable */
117 			local_irq_enable();
118 			trace_kvm_check_requests(vcpu);
119 			r = kvmppc_core_check_requests(vcpu);
120 			hard_irq_disable();
121 			if (r > 0)
122 				continue;
123 			break;
124 		}
125 
126 		if (kvmppc_core_prepare_to_enter(vcpu)) {
127 			/* interrupts got enabled in between, so we
128 			   are back at square 1 */
129 			continue;
130 		}
131 
132 		guest_enter_irqoff();
133 		return 1;
134 	}
135 
136 	/* return to host */
137 	local_irq_enable();
138 	return r;
139 }
140 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
141 
142 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
143 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
144 {
145 	struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
146 	int i;
147 
148 	shared->sprg0 = swab64(shared->sprg0);
149 	shared->sprg1 = swab64(shared->sprg1);
150 	shared->sprg2 = swab64(shared->sprg2);
151 	shared->sprg3 = swab64(shared->sprg3);
152 	shared->srr0 = swab64(shared->srr0);
153 	shared->srr1 = swab64(shared->srr1);
154 	shared->dar = swab64(shared->dar);
155 	shared->msr = swab64(shared->msr);
156 	shared->dsisr = swab32(shared->dsisr);
157 	shared->int_pending = swab32(shared->int_pending);
158 	for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
159 		shared->sr[i] = swab32(shared->sr[i]);
160 }
161 #endif
162 
163 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
164 {
165 	int nr = kvmppc_get_gpr(vcpu, 11);
166 	int r;
167 	unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
168 	unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
169 	unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
170 	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
171 	unsigned long r2 = 0;
172 
173 	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
174 		/* 32 bit mode */
175 		param1 &= 0xffffffff;
176 		param2 &= 0xffffffff;
177 		param3 &= 0xffffffff;
178 		param4 &= 0xffffffff;
179 	}
180 
181 	switch (nr) {
182 	case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
183 	{
184 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
185 		/* Book3S can be little endian, find it out here */
186 		int shared_big_endian = true;
187 		if (vcpu->arch.intr_msr & MSR_LE)
188 			shared_big_endian = false;
189 		if (shared_big_endian != vcpu->arch.shared_big_endian)
190 			kvmppc_swab_shared(vcpu);
191 		vcpu->arch.shared_big_endian = shared_big_endian;
192 #endif
193 
194 		if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
195 			/*
196 			 * Older versions of the Linux magic page code had
197 			 * a bug where they would map their trampoline code
198 			 * NX. If that's the case, remove !PR NX capability.
199 			 */
200 			vcpu->arch.disable_kernel_nx = true;
201 			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
202 		}
203 
204 		vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
205 		vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
206 
207 #ifdef CONFIG_PPC_64K_PAGES
208 		/*
209 		 * Make sure our 4k magic page is in the same window of a 64k
210 		 * page within the guest and within the host's page.
211 		 */
212 		if ((vcpu->arch.magic_page_pa & 0xf000) !=
213 		    ((ulong)vcpu->arch.shared & 0xf000)) {
214 			void *old_shared = vcpu->arch.shared;
215 			ulong shared = (ulong)vcpu->arch.shared;
216 			void *new_shared;
217 
218 			shared &= PAGE_MASK;
219 			shared |= vcpu->arch.magic_page_pa & 0xf000;
220 			new_shared = (void*)shared;
221 			memcpy(new_shared, old_shared, 0x1000);
222 			vcpu->arch.shared = new_shared;
223 		}
224 #endif
225 
226 		r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
227 
228 		r = EV_SUCCESS;
229 		break;
230 	}
231 	case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
232 		r = EV_SUCCESS;
233 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
234 		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
235 #endif
236 
237 		/* Second return value is in r4 */
238 		break;
239 	case EV_HCALL_TOKEN(EV_IDLE):
240 		r = EV_SUCCESS;
241 		kvm_vcpu_halt(vcpu);
242 		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
243 		break;
244 	default:
245 		r = EV_UNIMPLEMENTED;
246 		break;
247 	}
248 
249 	kvmppc_set_gpr(vcpu, 4, r2);
250 
251 	return r;
252 }
253 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
254 
255 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
256 {
257 	int r = false;
258 
259 	/* We have to know what CPU to virtualize */
260 	if (!vcpu->arch.pvr)
261 		goto out;
262 
263 	/* PAPR only works with book3s_64 */
264 	if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
265 		goto out;
266 
267 	/* HV KVM can only do PAPR mode for now */
268 	if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
269 		goto out;
270 
271 #ifdef CONFIG_KVM_BOOKE_HV
272 	if (!cpu_has_feature(CPU_FTR_EMB_HV))
273 		goto out;
274 #endif
275 
276 	r = true;
277 
278 out:
279 	vcpu->arch.sane = r;
280 	return r ? 0 : -EINVAL;
281 }
282 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
283 
284 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
285 {
286 	enum emulation_result er;
287 	int r;
288 
289 	er = kvmppc_emulate_loadstore(vcpu);
290 	switch (er) {
291 	case EMULATE_DONE:
292 		/* Future optimization: only reload non-volatiles if they were
293 		 * actually modified. */
294 		r = RESUME_GUEST_NV;
295 		break;
296 	case EMULATE_AGAIN:
297 		r = RESUME_GUEST;
298 		break;
299 	case EMULATE_DO_MMIO:
300 		vcpu->run->exit_reason = KVM_EXIT_MMIO;
301 		/* We must reload nonvolatiles because "update" load/store
302 		 * instructions modify register state. */
303 		/* Future optimization: only reload non-volatiles if they were
304 		 * actually modified. */
305 		r = RESUME_HOST_NV;
306 		break;
307 	case EMULATE_FAIL:
308 	{
309 		u32 last_inst;
310 
311 		kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
312 		kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
313 				      last_inst);
314 
315 		/*
316 		 * Injecting a Data Storage here is a bit more
317 		 * accurate since the instruction that caused the
318 		 * access could still be a valid one.
319 		 */
320 		if (!IS_ENABLED(CONFIG_BOOKE)) {
321 			ulong dsisr = DSISR_BADACCESS;
322 
323 			if (vcpu->mmio_is_write)
324 				dsisr |= DSISR_ISSTORE;
325 
326 			kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr);
327 		} else {
328 			/*
329 			 * BookE does not send a SIGBUS on a bad
330 			 * fault, so use a Program interrupt instead
331 			 * to avoid a fault loop.
332 			 */
333 			kvmppc_core_queue_program(vcpu, 0);
334 		}
335 
336 		r = RESUME_GUEST;
337 		break;
338 	}
339 	default:
340 		WARN_ON(1);
341 		r = RESUME_GUEST;
342 	}
343 
344 	return r;
345 }
346 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
347 
348 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
349 	      bool data)
350 {
351 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
352 	struct kvmppc_pte pte;
353 	int r = -EINVAL;
354 
355 	vcpu->stat.st++;
356 
357 	if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
358 		r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
359 							    size);
360 
361 	if ((!r) || (r == -EAGAIN))
362 		return r;
363 
364 	r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
365 			 XLATE_WRITE, &pte);
366 	if (r < 0)
367 		return r;
368 
369 	*eaddr = pte.raddr;
370 
371 	if (!pte.may_write)
372 		return -EPERM;
373 
374 	/* Magic page override */
375 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
376 	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
377 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
378 		void *magic = vcpu->arch.shared;
379 		magic += pte.eaddr & 0xfff;
380 		memcpy(magic, ptr, size);
381 		return EMULATE_DONE;
382 	}
383 
384 	if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
385 		return EMULATE_DO_MMIO;
386 
387 	return EMULATE_DONE;
388 }
389 EXPORT_SYMBOL_GPL(kvmppc_st);
390 
391 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
392 		      bool data)
393 {
394 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
395 	struct kvmppc_pte pte;
396 	int rc = -EINVAL;
397 
398 	vcpu->stat.ld++;
399 
400 	if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
401 		rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
402 							      size);
403 
404 	if ((!rc) || (rc == -EAGAIN))
405 		return rc;
406 
407 	rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
408 			  XLATE_READ, &pte);
409 	if (rc)
410 		return rc;
411 
412 	*eaddr = pte.raddr;
413 
414 	if (!pte.may_read)
415 		return -EPERM;
416 
417 	if (!data && !pte.may_execute)
418 		return -ENOEXEC;
419 
420 	/* Magic page override */
421 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
422 	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
423 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
424 		void *magic = vcpu->arch.shared;
425 		magic += pte.eaddr & 0xfff;
426 		memcpy(ptr, magic, size);
427 		return EMULATE_DONE;
428 	}
429 
430 	kvm_vcpu_srcu_read_lock(vcpu);
431 	rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
432 	kvm_vcpu_srcu_read_unlock(vcpu);
433 	if (rc)
434 		return EMULATE_DO_MMIO;
435 
436 	return EMULATE_DONE;
437 }
438 EXPORT_SYMBOL_GPL(kvmppc_ld);
439 
440 int kvm_arch_hardware_enable(void)
441 {
442 	return 0;
443 }
444 
445 int kvm_arch_hardware_setup(void *opaque)
446 {
447 	return 0;
448 }
449 
450 int kvm_arch_check_processor_compat(void *opaque)
451 {
452 	return kvmppc_core_check_processor_compat();
453 }
454 
455 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
456 {
457 	struct kvmppc_ops *kvm_ops = NULL;
458 	int r;
459 
460 	/*
461 	 * if we have both HV and PR enabled, default is HV
462 	 */
463 	if (type == 0) {
464 		if (kvmppc_hv_ops)
465 			kvm_ops = kvmppc_hv_ops;
466 		else
467 			kvm_ops = kvmppc_pr_ops;
468 		if (!kvm_ops)
469 			goto err_out;
470 	} else	if (type == KVM_VM_PPC_HV) {
471 		if (!kvmppc_hv_ops)
472 			goto err_out;
473 		kvm_ops = kvmppc_hv_ops;
474 	} else if (type == KVM_VM_PPC_PR) {
475 		if (!kvmppc_pr_ops)
476 			goto err_out;
477 		kvm_ops = kvmppc_pr_ops;
478 	} else
479 		goto err_out;
480 
481 	if (!try_module_get(kvm_ops->owner))
482 		return -ENOENT;
483 
484 	kvm->arch.kvm_ops = kvm_ops;
485 	r = kvmppc_core_init_vm(kvm);
486 	if (r)
487 		module_put(kvm_ops->owner);
488 	return r;
489 err_out:
490 	return -EINVAL;
491 }
492 
493 void kvm_arch_destroy_vm(struct kvm *kvm)
494 {
495 #ifdef CONFIG_KVM_XICS
496 	/*
497 	 * We call kick_all_cpus_sync() to ensure that all
498 	 * CPUs have executed any pending IPIs before we
499 	 * continue and free VCPUs structures below.
500 	 */
501 	if (is_kvmppc_hv_enabled(kvm))
502 		kick_all_cpus_sync();
503 #endif
504 
505 	kvm_destroy_vcpus(kvm);
506 
507 	mutex_lock(&kvm->lock);
508 
509 	kvmppc_core_destroy_vm(kvm);
510 
511 	mutex_unlock(&kvm->lock);
512 
513 	/* drop the module reference */
514 	module_put(kvm->arch.kvm_ops->owner);
515 }
516 
517 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
518 {
519 	int r;
520 	/* Assume we're using HV mode when the HV module is loaded */
521 	int hv_enabled = kvmppc_hv_ops ? 1 : 0;
522 
523 	if (kvm) {
524 		/*
525 		 * Hooray - we know which VM type we're running on. Depend on
526 		 * that rather than the guess above.
527 		 */
528 		hv_enabled = is_kvmppc_hv_enabled(kvm);
529 	}
530 
531 	switch (ext) {
532 #ifdef CONFIG_BOOKE
533 	case KVM_CAP_PPC_BOOKE_SREGS:
534 	case KVM_CAP_PPC_BOOKE_WATCHDOG:
535 	case KVM_CAP_PPC_EPR:
536 #else
537 	case KVM_CAP_PPC_SEGSTATE:
538 	case KVM_CAP_PPC_HIOR:
539 	case KVM_CAP_PPC_PAPR:
540 #endif
541 	case KVM_CAP_PPC_UNSET_IRQ:
542 	case KVM_CAP_PPC_IRQ_LEVEL:
543 	case KVM_CAP_ENABLE_CAP:
544 	case KVM_CAP_ONE_REG:
545 	case KVM_CAP_IOEVENTFD:
546 	case KVM_CAP_DEVICE_CTRL:
547 	case KVM_CAP_IMMEDIATE_EXIT:
548 	case KVM_CAP_SET_GUEST_DEBUG:
549 		r = 1;
550 		break;
551 	case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
552 	case KVM_CAP_PPC_PAIRED_SINGLES:
553 	case KVM_CAP_PPC_OSI:
554 	case KVM_CAP_PPC_GET_PVINFO:
555 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
556 	case KVM_CAP_SW_TLB:
557 #endif
558 		/* We support this only for PR */
559 		r = !hv_enabled;
560 		break;
561 #ifdef CONFIG_KVM_MPIC
562 	case KVM_CAP_IRQ_MPIC:
563 		r = 1;
564 		break;
565 #endif
566 
567 #ifdef CONFIG_PPC_BOOK3S_64
568 	case KVM_CAP_SPAPR_TCE:
569 	case KVM_CAP_SPAPR_TCE_64:
570 		r = 1;
571 		break;
572 	case KVM_CAP_SPAPR_TCE_VFIO:
573 		r = !!cpu_has_feature(CPU_FTR_HVMODE);
574 		break;
575 	case KVM_CAP_PPC_RTAS:
576 	case KVM_CAP_PPC_FIXUP_HCALL:
577 	case KVM_CAP_PPC_ENABLE_HCALL:
578 #ifdef CONFIG_KVM_XICS
579 	case KVM_CAP_IRQ_XICS:
580 #endif
581 	case KVM_CAP_PPC_GET_CPU_CHAR:
582 		r = 1;
583 		break;
584 #ifdef CONFIG_KVM_XIVE
585 	case KVM_CAP_PPC_IRQ_XIVE:
586 		/*
587 		 * We need XIVE to be enabled on the platform (implies
588 		 * a POWER9 processor) and the PowerNV platform, as
589 		 * nested is not yet supported.
590 		 */
591 		r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
592 			kvmppc_xive_native_supported();
593 		break;
594 #endif
595 
596 	case KVM_CAP_PPC_ALLOC_HTAB:
597 		r = hv_enabled;
598 		break;
599 #endif /* CONFIG_PPC_BOOK3S_64 */
600 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
601 	case KVM_CAP_PPC_SMT:
602 		r = 0;
603 		if (kvm) {
604 			if (kvm->arch.emul_smt_mode > 1)
605 				r = kvm->arch.emul_smt_mode;
606 			else
607 				r = kvm->arch.smt_mode;
608 		} else if (hv_enabled) {
609 			if (cpu_has_feature(CPU_FTR_ARCH_300))
610 				r = 1;
611 			else
612 				r = threads_per_subcore;
613 		}
614 		break;
615 	case KVM_CAP_PPC_SMT_POSSIBLE:
616 		r = 1;
617 		if (hv_enabled) {
618 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
619 				r = ((threads_per_subcore << 1) - 1);
620 			else
621 				/* P9 can emulate dbells, so allow any mode */
622 				r = 8 | 4 | 2 | 1;
623 		}
624 		break;
625 	case KVM_CAP_PPC_RMA:
626 		r = 0;
627 		break;
628 	case KVM_CAP_PPC_HWRNG:
629 		r = kvmppc_hwrng_present();
630 		break;
631 	case KVM_CAP_PPC_MMU_RADIX:
632 		r = !!(hv_enabled && radix_enabled());
633 		break;
634 	case KVM_CAP_PPC_MMU_HASH_V3:
635 		r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
636 		       kvmppc_hv_ops->hash_v3_possible());
637 		break;
638 	case KVM_CAP_PPC_NESTED_HV:
639 		r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
640 		       !kvmppc_hv_ops->enable_nested(NULL));
641 		break;
642 #endif
643 	case KVM_CAP_SYNC_MMU:
644 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
645 		r = hv_enabled;
646 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
647 		r = 1;
648 #else
649 		r = 0;
650 #endif
651 		break;
652 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
653 	case KVM_CAP_PPC_HTAB_FD:
654 		r = hv_enabled;
655 		break;
656 #endif
657 	case KVM_CAP_NR_VCPUS:
658 		/*
659 		 * Recommending a number of CPUs is somewhat arbitrary; we
660 		 * return the number of present CPUs for -HV (since a host
661 		 * will have secondary threads "offline"), and for other KVM
662 		 * implementations just count online CPUs.
663 		 */
664 		if (hv_enabled)
665 			r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
666 		else
667 			r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
668 		break;
669 	case KVM_CAP_MAX_VCPUS:
670 		r = KVM_MAX_VCPUS;
671 		break;
672 	case KVM_CAP_MAX_VCPU_ID:
673 		r = KVM_MAX_VCPU_IDS;
674 		break;
675 #ifdef CONFIG_PPC_BOOK3S_64
676 	case KVM_CAP_PPC_GET_SMMU_INFO:
677 		r = 1;
678 		break;
679 	case KVM_CAP_SPAPR_MULTITCE:
680 		r = 1;
681 		break;
682 	case KVM_CAP_SPAPR_RESIZE_HPT:
683 		r = !!hv_enabled;
684 		break;
685 #endif
686 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
687 	case KVM_CAP_PPC_FWNMI:
688 		r = hv_enabled;
689 		break;
690 #endif
691 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
692 	case KVM_CAP_PPC_HTM:
693 		r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
694 		     (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
695 		break;
696 #endif
697 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
698 	case KVM_CAP_PPC_SECURE_GUEST:
699 		r = hv_enabled && kvmppc_hv_ops->enable_svm &&
700 			!kvmppc_hv_ops->enable_svm(NULL);
701 		break;
702 	case KVM_CAP_PPC_DAWR1:
703 		r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
704 		       !kvmppc_hv_ops->enable_dawr1(NULL));
705 		break;
706 	case KVM_CAP_PPC_RPT_INVALIDATE:
707 		r = 1;
708 		break;
709 #endif
710 	case KVM_CAP_PPC_AIL_MODE_3:
711 		r = 0;
712 		/*
713 		 * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
714 		 * The POWER9s can support it if the guest runs in hash mode,
715 		 * but QEMU doesn't necessarily query the capability in time.
716 		 */
717 		if (hv_enabled) {
718 			if (kvmhv_on_pseries()) {
719 				if (pseries_reloc_on_exception())
720 					r = 1;
721 			} else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
722 				  !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
723 				r = 1;
724 			}
725 		}
726 		break;
727 	default:
728 		r = 0;
729 		break;
730 	}
731 	return r;
732 
733 }
734 
735 long kvm_arch_dev_ioctl(struct file *filp,
736                         unsigned int ioctl, unsigned long arg)
737 {
738 	return -EINVAL;
739 }
740 
741 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
742 {
743 	kvmppc_core_free_memslot(kvm, slot);
744 }
745 
746 int kvm_arch_prepare_memory_region(struct kvm *kvm,
747 				   const struct kvm_memory_slot *old,
748 				   struct kvm_memory_slot *new,
749 				   enum kvm_mr_change change)
750 {
751 	return kvmppc_core_prepare_memory_region(kvm, old, new, change);
752 }
753 
754 void kvm_arch_commit_memory_region(struct kvm *kvm,
755 				   struct kvm_memory_slot *old,
756 				   const struct kvm_memory_slot *new,
757 				   enum kvm_mr_change change)
758 {
759 	kvmppc_core_commit_memory_region(kvm, old, new, change);
760 }
761 
762 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
763 				   struct kvm_memory_slot *slot)
764 {
765 	kvmppc_core_flush_memslot(kvm, slot);
766 }
767 
768 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
769 {
770 	return 0;
771 }
772 
773 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
774 {
775 	struct kvm_vcpu *vcpu;
776 
777 	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
778 	kvmppc_decrementer_func(vcpu);
779 
780 	return HRTIMER_NORESTART;
781 }
782 
783 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
784 {
785 	int err;
786 
787 	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
788 	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
789 	vcpu->arch.dec_expires = get_tb();
790 
791 #ifdef CONFIG_KVM_EXIT_TIMING
792 	mutex_init(&vcpu->arch.exit_timing_lock);
793 #endif
794 	err = kvmppc_subarch_vcpu_init(vcpu);
795 	if (err)
796 		return err;
797 
798 	err = kvmppc_core_vcpu_create(vcpu);
799 	if (err)
800 		goto out_vcpu_uninit;
801 
802 	rcuwait_init(&vcpu->arch.wait);
803 	vcpu->arch.waitp = &vcpu->arch.wait;
804 	return 0;
805 
806 out_vcpu_uninit:
807 	kvmppc_subarch_vcpu_uninit(vcpu);
808 	return err;
809 }
810 
811 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
812 {
813 }
814 
815 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
816 {
817 	/* Make sure we're not using the vcpu anymore */
818 	hrtimer_cancel(&vcpu->arch.dec_timer);
819 
820 	switch (vcpu->arch.irq_type) {
821 	case KVMPPC_IRQ_MPIC:
822 		kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
823 		break;
824 	case KVMPPC_IRQ_XICS:
825 		if (xics_on_xive())
826 			kvmppc_xive_cleanup_vcpu(vcpu);
827 		else
828 			kvmppc_xics_free_icp(vcpu);
829 		break;
830 	case KVMPPC_IRQ_XIVE:
831 		kvmppc_xive_native_cleanup_vcpu(vcpu);
832 		break;
833 	}
834 
835 	kvmppc_core_vcpu_free(vcpu);
836 
837 	kvmppc_subarch_vcpu_uninit(vcpu);
838 }
839 
840 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
841 {
842 	return kvmppc_core_pending_dec(vcpu);
843 }
844 
845 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
846 {
847 #ifdef CONFIG_BOOKE
848 	/*
849 	 * vrsave (formerly usprg0) isn't used by Linux, but may
850 	 * be used by the guest.
851 	 *
852 	 * On non-booke this is associated with Altivec and
853 	 * is handled by code in book3s.c.
854 	 */
855 	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
856 #endif
857 	kvmppc_core_vcpu_load(vcpu, cpu);
858 }
859 
860 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
861 {
862 	kvmppc_core_vcpu_put(vcpu);
863 #ifdef CONFIG_BOOKE
864 	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
865 #endif
866 }
867 
868 /*
869  * irq_bypass_add_producer and irq_bypass_del_producer are only
870  * useful if the architecture supports PCI passthrough.
871  * irq_bypass_stop and irq_bypass_start are not needed and so
872  * kvm_ops are not defined for them.
873  */
874 bool kvm_arch_has_irq_bypass(void)
875 {
876 	return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
877 		(kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
878 }
879 
880 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
881 				     struct irq_bypass_producer *prod)
882 {
883 	struct kvm_kernel_irqfd *irqfd =
884 		container_of(cons, struct kvm_kernel_irqfd, consumer);
885 	struct kvm *kvm = irqfd->kvm;
886 
887 	if (kvm->arch.kvm_ops->irq_bypass_add_producer)
888 		return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
889 
890 	return 0;
891 }
892 
893 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
894 				      struct irq_bypass_producer *prod)
895 {
896 	struct kvm_kernel_irqfd *irqfd =
897 		container_of(cons, struct kvm_kernel_irqfd, consumer);
898 	struct kvm *kvm = irqfd->kvm;
899 
900 	if (kvm->arch.kvm_ops->irq_bypass_del_producer)
901 		kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
902 }
903 
904 #ifdef CONFIG_VSX
905 static inline int kvmppc_get_vsr_dword_offset(int index)
906 {
907 	int offset;
908 
909 	if ((index != 0) && (index != 1))
910 		return -1;
911 
912 #ifdef __BIG_ENDIAN
913 	offset =  index;
914 #else
915 	offset = 1 - index;
916 #endif
917 
918 	return offset;
919 }
920 
921 static inline int kvmppc_get_vsr_word_offset(int index)
922 {
923 	int offset;
924 
925 	if ((index > 3) || (index < 0))
926 		return -1;
927 
928 #ifdef __BIG_ENDIAN
929 	offset = index;
930 #else
931 	offset = 3 - index;
932 #endif
933 	return offset;
934 }
935 
936 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
937 	u64 gpr)
938 {
939 	union kvmppc_one_reg val;
940 	int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
941 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
942 
943 	if (offset == -1)
944 		return;
945 
946 	if (index >= 32) {
947 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
948 		val.vsxval[offset] = gpr;
949 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
950 	} else {
951 		VCPU_VSX_FPR(vcpu, index, offset) = gpr;
952 	}
953 }
954 
955 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
956 	u64 gpr)
957 {
958 	union kvmppc_one_reg val;
959 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
960 
961 	if (index >= 32) {
962 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
963 		val.vsxval[0] = gpr;
964 		val.vsxval[1] = gpr;
965 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
966 	} else {
967 		VCPU_VSX_FPR(vcpu, index, 0) = gpr;
968 		VCPU_VSX_FPR(vcpu, index, 1) = gpr;
969 	}
970 }
971 
972 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
973 	u32 gpr)
974 {
975 	union kvmppc_one_reg val;
976 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
977 
978 	if (index >= 32) {
979 		val.vsx32val[0] = gpr;
980 		val.vsx32val[1] = gpr;
981 		val.vsx32val[2] = gpr;
982 		val.vsx32val[3] = gpr;
983 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
984 	} else {
985 		val.vsx32val[0] = gpr;
986 		val.vsx32val[1] = gpr;
987 		VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
988 		VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
989 	}
990 }
991 
992 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
993 	u32 gpr32)
994 {
995 	union kvmppc_one_reg val;
996 	int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
997 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
998 	int dword_offset, word_offset;
999 
1000 	if (offset == -1)
1001 		return;
1002 
1003 	if (index >= 32) {
1004 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
1005 		val.vsx32val[offset] = gpr32;
1006 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
1007 	} else {
1008 		dword_offset = offset / 2;
1009 		word_offset = offset % 2;
1010 		val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
1011 		val.vsx32val[word_offset] = gpr32;
1012 		VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
1013 	}
1014 }
1015 #endif /* CONFIG_VSX */
1016 
1017 #ifdef CONFIG_ALTIVEC
1018 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
1019 		int index, int element_size)
1020 {
1021 	int offset;
1022 	int elts = sizeof(vector128)/element_size;
1023 
1024 	if ((index < 0) || (index >= elts))
1025 		return -1;
1026 
1027 	if (kvmppc_need_byteswap(vcpu))
1028 		offset = elts - index - 1;
1029 	else
1030 		offset = index;
1031 
1032 	return offset;
1033 }
1034 
1035 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1036 		int index)
1037 {
1038 	return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1039 }
1040 
1041 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1042 		int index)
1043 {
1044 	return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1045 }
1046 
1047 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1048 		int index)
1049 {
1050 	return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1051 }
1052 
1053 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1054 		int index)
1055 {
1056 	return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1057 }
1058 
1059 
1060 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1061 	u64 gpr)
1062 {
1063 	union kvmppc_one_reg val;
1064 	int offset = kvmppc_get_vmx_dword_offset(vcpu,
1065 			vcpu->arch.mmio_vmx_offset);
1066 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1067 
1068 	if (offset == -1)
1069 		return;
1070 
1071 	val.vval = VCPU_VSX_VR(vcpu, index);
1072 	val.vsxval[offset] = gpr;
1073 	VCPU_VSX_VR(vcpu, index) = val.vval;
1074 }
1075 
1076 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1077 	u32 gpr32)
1078 {
1079 	union kvmppc_one_reg val;
1080 	int offset = kvmppc_get_vmx_word_offset(vcpu,
1081 			vcpu->arch.mmio_vmx_offset);
1082 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1083 
1084 	if (offset == -1)
1085 		return;
1086 
1087 	val.vval = VCPU_VSX_VR(vcpu, index);
1088 	val.vsx32val[offset] = gpr32;
1089 	VCPU_VSX_VR(vcpu, index) = val.vval;
1090 }
1091 
1092 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1093 	u16 gpr16)
1094 {
1095 	union kvmppc_one_reg val;
1096 	int offset = kvmppc_get_vmx_hword_offset(vcpu,
1097 			vcpu->arch.mmio_vmx_offset);
1098 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1099 
1100 	if (offset == -1)
1101 		return;
1102 
1103 	val.vval = VCPU_VSX_VR(vcpu, index);
1104 	val.vsx16val[offset] = gpr16;
1105 	VCPU_VSX_VR(vcpu, index) = val.vval;
1106 }
1107 
1108 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1109 	u8 gpr8)
1110 {
1111 	union kvmppc_one_reg val;
1112 	int offset = kvmppc_get_vmx_byte_offset(vcpu,
1113 			vcpu->arch.mmio_vmx_offset);
1114 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1115 
1116 	if (offset == -1)
1117 		return;
1118 
1119 	val.vval = VCPU_VSX_VR(vcpu, index);
1120 	val.vsx8val[offset] = gpr8;
1121 	VCPU_VSX_VR(vcpu, index) = val.vval;
1122 }
1123 #endif /* CONFIG_ALTIVEC */
1124 
1125 #ifdef CONFIG_PPC_FPU
1126 static inline u64 sp_to_dp(u32 fprs)
1127 {
1128 	u64 fprd;
1129 
1130 	preempt_disable();
1131 	enable_kernel_fp();
1132 	asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
1133 	     : "fr0");
1134 	preempt_enable();
1135 	return fprd;
1136 }
1137 
1138 static inline u32 dp_to_sp(u64 fprd)
1139 {
1140 	u32 fprs;
1141 
1142 	preempt_disable();
1143 	enable_kernel_fp();
1144 	asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
1145 	     : "fr0");
1146 	preempt_enable();
1147 	return fprs;
1148 }
1149 
1150 #else
1151 #define sp_to_dp(x)	(x)
1152 #define dp_to_sp(x)	(x)
1153 #endif /* CONFIG_PPC_FPU */
1154 
1155 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1156 {
1157 	struct kvm_run *run = vcpu->run;
1158 	u64 gpr;
1159 
1160 	if (run->mmio.len > sizeof(gpr))
1161 		return;
1162 
1163 	if (!vcpu->arch.mmio_host_swabbed) {
1164 		switch (run->mmio.len) {
1165 		case 8: gpr = *(u64 *)run->mmio.data; break;
1166 		case 4: gpr = *(u32 *)run->mmio.data; break;
1167 		case 2: gpr = *(u16 *)run->mmio.data; break;
1168 		case 1: gpr = *(u8 *)run->mmio.data; break;
1169 		}
1170 	} else {
1171 		switch (run->mmio.len) {
1172 		case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1173 		case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1174 		case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1175 		case 1: gpr = *(u8 *)run->mmio.data; break;
1176 		}
1177 	}
1178 
1179 	/* conversion between single and double precision */
1180 	if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1181 		gpr = sp_to_dp(gpr);
1182 
1183 	if (vcpu->arch.mmio_sign_extend) {
1184 		switch (run->mmio.len) {
1185 #ifdef CONFIG_PPC64
1186 		case 4:
1187 			gpr = (s64)(s32)gpr;
1188 			break;
1189 #endif
1190 		case 2:
1191 			gpr = (s64)(s16)gpr;
1192 			break;
1193 		case 1:
1194 			gpr = (s64)(s8)gpr;
1195 			break;
1196 		}
1197 	}
1198 
1199 	switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1200 	case KVM_MMIO_REG_GPR:
1201 		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1202 		break;
1203 	case KVM_MMIO_REG_FPR:
1204 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1205 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1206 
1207 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1208 		break;
1209 #ifdef CONFIG_PPC_BOOK3S
1210 	case KVM_MMIO_REG_QPR:
1211 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1212 		break;
1213 	case KVM_MMIO_REG_FQPR:
1214 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1215 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1216 		break;
1217 #endif
1218 #ifdef CONFIG_VSX
1219 	case KVM_MMIO_REG_VSX:
1220 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1221 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1222 
1223 		if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1224 			kvmppc_set_vsr_dword(vcpu, gpr);
1225 		else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1226 			kvmppc_set_vsr_word(vcpu, gpr);
1227 		else if (vcpu->arch.mmio_copy_type ==
1228 				KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1229 			kvmppc_set_vsr_dword_dump(vcpu, gpr);
1230 		else if (vcpu->arch.mmio_copy_type ==
1231 				KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1232 			kvmppc_set_vsr_word_dump(vcpu, gpr);
1233 		break;
1234 #endif
1235 #ifdef CONFIG_ALTIVEC
1236 	case KVM_MMIO_REG_VMX:
1237 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1238 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1239 
1240 		if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1241 			kvmppc_set_vmx_dword(vcpu, gpr);
1242 		else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1243 			kvmppc_set_vmx_word(vcpu, gpr);
1244 		else if (vcpu->arch.mmio_copy_type ==
1245 				KVMPPC_VMX_COPY_HWORD)
1246 			kvmppc_set_vmx_hword(vcpu, gpr);
1247 		else if (vcpu->arch.mmio_copy_type ==
1248 				KVMPPC_VMX_COPY_BYTE)
1249 			kvmppc_set_vmx_byte(vcpu, gpr);
1250 		break;
1251 #endif
1252 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1253 	case KVM_MMIO_REG_NESTED_GPR:
1254 		if (kvmppc_need_byteswap(vcpu))
1255 			gpr = swab64(gpr);
1256 		kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1257 				     sizeof(gpr));
1258 		break;
1259 #endif
1260 	default:
1261 		BUG();
1262 	}
1263 }
1264 
1265 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
1266 				unsigned int rt, unsigned int bytes,
1267 				int is_default_endian, int sign_extend)
1268 {
1269 	struct kvm_run *run = vcpu->run;
1270 	int idx, ret;
1271 	bool host_swabbed;
1272 
1273 	/* Pity C doesn't have a logical XOR operator */
1274 	if (kvmppc_need_byteswap(vcpu)) {
1275 		host_swabbed = is_default_endian;
1276 	} else {
1277 		host_swabbed = !is_default_endian;
1278 	}
1279 
1280 	if (bytes > sizeof(run->mmio.data))
1281 		return EMULATE_FAIL;
1282 
1283 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1284 	run->mmio.len = bytes;
1285 	run->mmio.is_write = 0;
1286 
1287 	vcpu->arch.io_gpr = rt;
1288 	vcpu->arch.mmio_host_swabbed = host_swabbed;
1289 	vcpu->mmio_needed = 1;
1290 	vcpu->mmio_is_write = 0;
1291 	vcpu->arch.mmio_sign_extend = sign_extend;
1292 
1293 	idx = srcu_read_lock(&vcpu->kvm->srcu);
1294 
1295 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1296 			      bytes, &run->mmio.data);
1297 
1298 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
1299 
1300 	if (!ret) {
1301 		kvmppc_complete_mmio_load(vcpu);
1302 		vcpu->mmio_needed = 0;
1303 		return EMULATE_DONE;
1304 	}
1305 
1306 	return EMULATE_DO_MMIO;
1307 }
1308 
1309 int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1310 		       unsigned int rt, unsigned int bytes,
1311 		       int is_default_endian)
1312 {
1313 	return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1314 }
1315 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1316 
1317 /* Same as above, but sign extends */
1318 int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
1319 			unsigned int rt, unsigned int bytes,
1320 			int is_default_endian)
1321 {
1322 	return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
1323 }
1324 
1325 #ifdef CONFIG_VSX
1326 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
1327 			unsigned int rt, unsigned int bytes,
1328 			int is_default_endian, int mmio_sign_extend)
1329 {
1330 	enum emulation_result emulated = EMULATE_DONE;
1331 
1332 	/* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1333 	if (vcpu->arch.mmio_vsx_copy_nums > 4)
1334 		return EMULATE_FAIL;
1335 
1336 	while (vcpu->arch.mmio_vsx_copy_nums) {
1337 		emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1338 			is_default_endian, mmio_sign_extend);
1339 
1340 		if (emulated != EMULATE_DONE)
1341 			break;
1342 
1343 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1344 
1345 		vcpu->arch.mmio_vsx_copy_nums--;
1346 		vcpu->arch.mmio_vsx_offset++;
1347 	}
1348 	return emulated;
1349 }
1350 #endif /* CONFIG_VSX */
1351 
1352 int kvmppc_handle_store(struct kvm_vcpu *vcpu,
1353 			u64 val, unsigned int bytes, int is_default_endian)
1354 {
1355 	struct kvm_run *run = vcpu->run;
1356 	void *data = run->mmio.data;
1357 	int idx, ret;
1358 	bool host_swabbed;
1359 
1360 	/* Pity C doesn't have a logical XOR operator */
1361 	if (kvmppc_need_byteswap(vcpu)) {
1362 		host_swabbed = is_default_endian;
1363 	} else {
1364 		host_swabbed = !is_default_endian;
1365 	}
1366 
1367 	if (bytes > sizeof(run->mmio.data))
1368 		return EMULATE_FAIL;
1369 
1370 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1371 	run->mmio.len = bytes;
1372 	run->mmio.is_write = 1;
1373 	vcpu->mmio_needed = 1;
1374 	vcpu->mmio_is_write = 1;
1375 
1376 	if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1377 		val = dp_to_sp(val);
1378 
1379 	/* Store the value at the lowest bytes in 'data'. */
1380 	if (!host_swabbed) {
1381 		switch (bytes) {
1382 		case 8: *(u64 *)data = val; break;
1383 		case 4: *(u32 *)data = val; break;
1384 		case 2: *(u16 *)data = val; break;
1385 		case 1: *(u8  *)data = val; break;
1386 		}
1387 	} else {
1388 		switch (bytes) {
1389 		case 8: *(u64 *)data = swab64(val); break;
1390 		case 4: *(u32 *)data = swab32(val); break;
1391 		case 2: *(u16 *)data = swab16(val); break;
1392 		case 1: *(u8  *)data = val; break;
1393 		}
1394 	}
1395 
1396 	idx = srcu_read_lock(&vcpu->kvm->srcu);
1397 
1398 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1399 			       bytes, &run->mmio.data);
1400 
1401 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
1402 
1403 	if (!ret) {
1404 		vcpu->mmio_needed = 0;
1405 		return EMULATE_DONE;
1406 	}
1407 
1408 	return EMULATE_DO_MMIO;
1409 }
1410 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1411 
1412 #ifdef CONFIG_VSX
1413 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1414 {
1415 	u32 dword_offset, word_offset;
1416 	union kvmppc_one_reg reg;
1417 	int vsx_offset = 0;
1418 	int copy_type = vcpu->arch.mmio_copy_type;
1419 	int result = 0;
1420 
1421 	switch (copy_type) {
1422 	case KVMPPC_VSX_COPY_DWORD:
1423 		vsx_offset =
1424 			kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1425 
1426 		if (vsx_offset == -1) {
1427 			result = -1;
1428 			break;
1429 		}
1430 
1431 		if (rs < 32) {
1432 			*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1433 		} else {
1434 			reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1435 			*val = reg.vsxval[vsx_offset];
1436 		}
1437 		break;
1438 
1439 	case KVMPPC_VSX_COPY_WORD:
1440 		vsx_offset =
1441 			kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1442 
1443 		if (vsx_offset == -1) {
1444 			result = -1;
1445 			break;
1446 		}
1447 
1448 		if (rs < 32) {
1449 			dword_offset = vsx_offset / 2;
1450 			word_offset = vsx_offset % 2;
1451 			reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1452 			*val = reg.vsx32val[word_offset];
1453 		} else {
1454 			reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1455 			*val = reg.vsx32val[vsx_offset];
1456 		}
1457 		break;
1458 
1459 	default:
1460 		result = -1;
1461 		break;
1462 	}
1463 
1464 	return result;
1465 }
1466 
1467 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
1468 			int rs, unsigned int bytes, int is_default_endian)
1469 {
1470 	u64 val;
1471 	enum emulation_result emulated = EMULATE_DONE;
1472 
1473 	vcpu->arch.io_gpr = rs;
1474 
1475 	/* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1476 	if (vcpu->arch.mmio_vsx_copy_nums > 4)
1477 		return EMULATE_FAIL;
1478 
1479 	while (vcpu->arch.mmio_vsx_copy_nums) {
1480 		if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1481 			return EMULATE_FAIL;
1482 
1483 		emulated = kvmppc_handle_store(vcpu,
1484 			 val, bytes, is_default_endian);
1485 
1486 		if (emulated != EMULATE_DONE)
1487 			break;
1488 
1489 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1490 
1491 		vcpu->arch.mmio_vsx_copy_nums--;
1492 		vcpu->arch.mmio_vsx_offset++;
1493 	}
1494 
1495 	return emulated;
1496 }
1497 
1498 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
1499 {
1500 	struct kvm_run *run = vcpu->run;
1501 	enum emulation_result emulated = EMULATE_FAIL;
1502 	int r;
1503 
1504 	vcpu->arch.paddr_accessed += run->mmio.len;
1505 
1506 	if (!vcpu->mmio_is_write) {
1507 		emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1508 			 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1509 	} else {
1510 		emulated = kvmppc_handle_vsx_store(vcpu,
1511 			 vcpu->arch.io_gpr, run->mmio.len, 1);
1512 	}
1513 
1514 	switch (emulated) {
1515 	case EMULATE_DO_MMIO:
1516 		run->exit_reason = KVM_EXIT_MMIO;
1517 		r = RESUME_HOST;
1518 		break;
1519 	case EMULATE_FAIL:
1520 		pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1521 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1522 		run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1523 		r = RESUME_HOST;
1524 		break;
1525 	default:
1526 		r = RESUME_GUEST;
1527 		break;
1528 	}
1529 	return r;
1530 }
1531 #endif /* CONFIG_VSX */
1532 
1533 #ifdef CONFIG_ALTIVEC
1534 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1535 		unsigned int rt, unsigned int bytes, int is_default_endian)
1536 {
1537 	enum emulation_result emulated = EMULATE_DONE;
1538 
1539 	if (vcpu->arch.mmio_vmx_copy_nums > 2)
1540 		return EMULATE_FAIL;
1541 
1542 	while (vcpu->arch.mmio_vmx_copy_nums) {
1543 		emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1544 				is_default_endian, 0);
1545 
1546 		if (emulated != EMULATE_DONE)
1547 			break;
1548 
1549 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1550 		vcpu->arch.mmio_vmx_copy_nums--;
1551 		vcpu->arch.mmio_vmx_offset++;
1552 	}
1553 
1554 	return emulated;
1555 }
1556 
1557 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1558 {
1559 	union kvmppc_one_reg reg;
1560 	int vmx_offset = 0;
1561 	int result = 0;
1562 
1563 	vmx_offset =
1564 		kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1565 
1566 	if (vmx_offset == -1)
1567 		return -1;
1568 
1569 	reg.vval = VCPU_VSX_VR(vcpu, index);
1570 	*val = reg.vsxval[vmx_offset];
1571 
1572 	return result;
1573 }
1574 
1575 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1576 {
1577 	union kvmppc_one_reg reg;
1578 	int vmx_offset = 0;
1579 	int result = 0;
1580 
1581 	vmx_offset =
1582 		kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1583 
1584 	if (vmx_offset == -1)
1585 		return -1;
1586 
1587 	reg.vval = VCPU_VSX_VR(vcpu, index);
1588 	*val = reg.vsx32val[vmx_offset];
1589 
1590 	return result;
1591 }
1592 
1593 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1594 {
1595 	union kvmppc_one_reg reg;
1596 	int vmx_offset = 0;
1597 	int result = 0;
1598 
1599 	vmx_offset =
1600 		kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1601 
1602 	if (vmx_offset == -1)
1603 		return -1;
1604 
1605 	reg.vval = VCPU_VSX_VR(vcpu, index);
1606 	*val = reg.vsx16val[vmx_offset];
1607 
1608 	return result;
1609 }
1610 
1611 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1612 {
1613 	union kvmppc_one_reg reg;
1614 	int vmx_offset = 0;
1615 	int result = 0;
1616 
1617 	vmx_offset =
1618 		kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1619 
1620 	if (vmx_offset == -1)
1621 		return -1;
1622 
1623 	reg.vval = VCPU_VSX_VR(vcpu, index);
1624 	*val = reg.vsx8val[vmx_offset];
1625 
1626 	return result;
1627 }
1628 
1629 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1630 		unsigned int rs, unsigned int bytes, int is_default_endian)
1631 {
1632 	u64 val = 0;
1633 	unsigned int index = rs & KVM_MMIO_REG_MASK;
1634 	enum emulation_result emulated = EMULATE_DONE;
1635 
1636 	if (vcpu->arch.mmio_vmx_copy_nums > 2)
1637 		return EMULATE_FAIL;
1638 
1639 	vcpu->arch.io_gpr = rs;
1640 
1641 	while (vcpu->arch.mmio_vmx_copy_nums) {
1642 		switch (vcpu->arch.mmio_copy_type) {
1643 		case KVMPPC_VMX_COPY_DWORD:
1644 			if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1645 				return EMULATE_FAIL;
1646 
1647 			break;
1648 		case KVMPPC_VMX_COPY_WORD:
1649 			if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1650 				return EMULATE_FAIL;
1651 			break;
1652 		case KVMPPC_VMX_COPY_HWORD:
1653 			if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1654 				return EMULATE_FAIL;
1655 			break;
1656 		case KVMPPC_VMX_COPY_BYTE:
1657 			if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1658 				return EMULATE_FAIL;
1659 			break;
1660 		default:
1661 			return EMULATE_FAIL;
1662 		}
1663 
1664 		emulated = kvmppc_handle_store(vcpu, val, bytes,
1665 				is_default_endian);
1666 		if (emulated != EMULATE_DONE)
1667 			break;
1668 
1669 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1670 		vcpu->arch.mmio_vmx_copy_nums--;
1671 		vcpu->arch.mmio_vmx_offset++;
1672 	}
1673 
1674 	return emulated;
1675 }
1676 
1677 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
1678 {
1679 	struct kvm_run *run = vcpu->run;
1680 	enum emulation_result emulated = EMULATE_FAIL;
1681 	int r;
1682 
1683 	vcpu->arch.paddr_accessed += run->mmio.len;
1684 
1685 	if (!vcpu->mmio_is_write) {
1686 		emulated = kvmppc_handle_vmx_load(vcpu,
1687 				vcpu->arch.io_gpr, run->mmio.len, 1);
1688 	} else {
1689 		emulated = kvmppc_handle_vmx_store(vcpu,
1690 				vcpu->arch.io_gpr, run->mmio.len, 1);
1691 	}
1692 
1693 	switch (emulated) {
1694 	case EMULATE_DO_MMIO:
1695 		run->exit_reason = KVM_EXIT_MMIO;
1696 		r = RESUME_HOST;
1697 		break;
1698 	case EMULATE_FAIL:
1699 		pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1700 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1701 		run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1702 		r = RESUME_HOST;
1703 		break;
1704 	default:
1705 		r = RESUME_GUEST;
1706 		break;
1707 	}
1708 	return r;
1709 }
1710 #endif /* CONFIG_ALTIVEC */
1711 
1712 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1713 {
1714 	int r = 0;
1715 	union kvmppc_one_reg val;
1716 	int size;
1717 
1718 	size = one_reg_size(reg->id);
1719 	if (size > sizeof(val))
1720 		return -EINVAL;
1721 
1722 	r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1723 	if (r == -EINVAL) {
1724 		r = 0;
1725 		switch (reg->id) {
1726 #ifdef CONFIG_ALTIVEC
1727 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1728 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1729 				r = -ENXIO;
1730 				break;
1731 			}
1732 			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1733 			break;
1734 		case KVM_REG_PPC_VSCR:
1735 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1736 				r = -ENXIO;
1737 				break;
1738 			}
1739 			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1740 			break;
1741 		case KVM_REG_PPC_VRSAVE:
1742 			val = get_reg_val(reg->id, vcpu->arch.vrsave);
1743 			break;
1744 #endif /* CONFIG_ALTIVEC */
1745 		default:
1746 			r = -EINVAL;
1747 			break;
1748 		}
1749 	}
1750 
1751 	if (r)
1752 		return r;
1753 
1754 	if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1755 		r = -EFAULT;
1756 
1757 	return r;
1758 }
1759 
1760 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1761 {
1762 	int r;
1763 	union kvmppc_one_reg val;
1764 	int size;
1765 
1766 	size = one_reg_size(reg->id);
1767 	if (size > sizeof(val))
1768 		return -EINVAL;
1769 
1770 	if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1771 		return -EFAULT;
1772 
1773 	r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1774 	if (r == -EINVAL) {
1775 		r = 0;
1776 		switch (reg->id) {
1777 #ifdef CONFIG_ALTIVEC
1778 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1779 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1780 				r = -ENXIO;
1781 				break;
1782 			}
1783 			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1784 			break;
1785 		case KVM_REG_PPC_VSCR:
1786 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1787 				r = -ENXIO;
1788 				break;
1789 			}
1790 			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1791 			break;
1792 		case KVM_REG_PPC_VRSAVE:
1793 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1794 				r = -ENXIO;
1795 				break;
1796 			}
1797 			vcpu->arch.vrsave = set_reg_val(reg->id, val);
1798 			break;
1799 #endif /* CONFIG_ALTIVEC */
1800 		default:
1801 			r = -EINVAL;
1802 			break;
1803 		}
1804 	}
1805 
1806 	return r;
1807 }
1808 
1809 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1810 {
1811 	struct kvm_run *run = vcpu->run;
1812 	int r;
1813 
1814 	vcpu_load(vcpu);
1815 
1816 	if (vcpu->mmio_needed) {
1817 		vcpu->mmio_needed = 0;
1818 		if (!vcpu->mmio_is_write)
1819 			kvmppc_complete_mmio_load(vcpu);
1820 #ifdef CONFIG_VSX
1821 		if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1822 			vcpu->arch.mmio_vsx_copy_nums--;
1823 			vcpu->arch.mmio_vsx_offset++;
1824 		}
1825 
1826 		if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1827 			r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
1828 			if (r == RESUME_HOST) {
1829 				vcpu->mmio_needed = 1;
1830 				goto out;
1831 			}
1832 		}
1833 #endif
1834 #ifdef CONFIG_ALTIVEC
1835 		if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1836 			vcpu->arch.mmio_vmx_copy_nums--;
1837 			vcpu->arch.mmio_vmx_offset++;
1838 		}
1839 
1840 		if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1841 			r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1842 			if (r == RESUME_HOST) {
1843 				vcpu->mmio_needed = 1;
1844 				goto out;
1845 			}
1846 		}
1847 #endif
1848 	} else if (vcpu->arch.osi_needed) {
1849 		u64 *gprs = run->osi.gprs;
1850 		int i;
1851 
1852 		for (i = 0; i < 32; i++)
1853 			kvmppc_set_gpr(vcpu, i, gprs[i]);
1854 		vcpu->arch.osi_needed = 0;
1855 	} else if (vcpu->arch.hcall_needed) {
1856 		int i;
1857 
1858 		kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1859 		for (i = 0; i < 9; ++i)
1860 			kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1861 		vcpu->arch.hcall_needed = 0;
1862 #ifdef CONFIG_BOOKE
1863 	} else if (vcpu->arch.epr_needed) {
1864 		kvmppc_set_epr(vcpu, run->epr.epr);
1865 		vcpu->arch.epr_needed = 0;
1866 #endif
1867 	}
1868 
1869 	kvm_sigset_activate(vcpu);
1870 
1871 	if (run->immediate_exit)
1872 		r = -EINTR;
1873 	else
1874 		r = kvmppc_vcpu_run(vcpu);
1875 
1876 	kvm_sigset_deactivate(vcpu);
1877 
1878 #ifdef CONFIG_ALTIVEC
1879 out:
1880 #endif
1881 
1882 	/*
1883 	 * We're already returning to userspace, don't pass the
1884 	 * RESUME_HOST flags along.
1885 	 */
1886 	if (r > 0)
1887 		r = 0;
1888 
1889 	vcpu_put(vcpu);
1890 	return r;
1891 }
1892 
1893 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1894 {
1895 	if (irq->irq == KVM_INTERRUPT_UNSET) {
1896 		kvmppc_core_dequeue_external(vcpu);
1897 		return 0;
1898 	}
1899 
1900 	kvmppc_core_queue_external(vcpu, irq);
1901 
1902 	kvm_vcpu_kick(vcpu);
1903 
1904 	return 0;
1905 }
1906 
1907 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1908 				     struct kvm_enable_cap *cap)
1909 {
1910 	int r;
1911 
1912 	if (cap->flags)
1913 		return -EINVAL;
1914 
1915 	switch (cap->cap) {
1916 	case KVM_CAP_PPC_OSI:
1917 		r = 0;
1918 		vcpu->arch.osi_enabled = true;
1919 		break;
1920 	case KVM_CAP_PPC_PAPR:
1921 		r = 0;
1922 		vcpu->arch.papr_enabled = true;
1923 		break;
1924 	case KVM_CAP_PPC_EPR:
1925 		r = 0;
1926 		if (cap->args[0])
1927 			vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1928 		else
1929 			vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1930 		break;
1931 #ifdef CONFIG_BOOKE
1932 	case KVM_CAP_PPC_BOOKE_WATCHDOG:
1933 		r = 0;
1934 		vcpu->arch.watchdog_enabled = true;
1935 		break;
1936 #endif
1937 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1938 	case KVM_CAP_SW_TLB: {
1939 		struct kvm_config_tlb cfg;
1940 		void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1941 
1942 		r = -EFAULT;
1943 		if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1944 			break;
1945 
1946 		r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1947 		break;
1948 	}
1949 #endif
1950 #ifdef CONFIG_KVM_MPIC
1951 	case KVM_CAP_IRQ_MPIC: {
1952 		struct fd f;
1953 		struct kvm_device *dev;
1954 
1955 		r = -EBADF;
1956 		f = fdget(cap->args[0]);
1957 		if (!f.file)
1958 			break;
1959 
1960 		r = -EPERM;
1961 		dev = kvm_device_from_filp(f.file);
1962 		if (dev)
1963 			r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1964 
1965 		fdput(f);
1966 		break;
1967 	}
1968 #endif
1969 #ifdef CONFIG_KVM_XICS
1970 	case KVM_CAP_IRQ_XICS: {
1971 		struct fd f;
1972 		struct kvm_device *dev;
1973 
1974 		r = -EBADF;
1975 		f = fdget(cap->args[0]);
1976 		if (!f.file)
1977 			break;
1978 
1979 		r = -EPERM;
1980 		dev = kvm_device_from_filp(f.file);
1981 		if (dev) {
1982 			if (xics_on_xive())
1983 				r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1984 			else
1985 				r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1986 		}
1987 
1988 		fdput(f);
1989 		break;
1990 	}
1991 #endif /* CONFIG_KVM_XICS */
1992 #ifdef CONFIG_KVM_XIVE
1993 	case KVM_CAP_PPC_IRQ_XIVE: {
1994 		struct fd f;
1995 		struct kvm_device *dev;
1996 
1997 		r = -EBADF;
1998 		f = fdget(cap->args[0]);
1999 		if (!f.file)
2000 			break;
2001 
2002 		r = -ENXIO;
2003 		if (!xive_enabled())
2004 			break;
2005 
2006 		r = -EPERM;
2007 		dev = kvm_device_from_filp(f.file);
2008 		if (dev)
2009 			r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
2010 							    cap->args[1]);
2011 
2012 		fdput(f);
2013 		break;
2014 	}
2015 #endif /* CONFIG_KVM_XIVE */
2016 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
2017 	case KVM_CAP_PPC_FWNMI:
2018 		r = -EINVAL;
2019 		if (!is_kvmppc_hv_enabled(vcpu->kvm))
2020 			break;
2021 		r = 0;
2022 		vcpu->kvm->arch.fwnmi_enabled = true;
2023 		break;
2024 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
2025 	default:
2026 		r = -EINVAL;
2027 		break;
2028 	}
2029 
2030 	if (!r)
2031 		r = kvmppc_sanity_check(vcpu);
2032 
2033 	return r;
2034 }
2035 
2036 bool kvm_arch_intc_initialized(struct kvm *kvm)
2037 {
2038 #ifdef CONFIG_KVM_MPIC
2039 	if (kvm->arch.mpic)
2040 		return true;
2041 #endif
2042 #ifdef CONFIG_KVM_XICS
2043 	if (kvm->arch.xics || kvm->arch.xive)
2044 		return true;
2045 #endif
2046 	return false;
2047 }
2048 
2049 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2050                                     struct kvm_mp_state *mp_state)
2051 {
2052 	return -EINVAL;
2053 }
2054 
2055 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2056                                     struct kvm_mp_state *mp_state)
2057 {
2058 	return -EINVAL;
2059 }
2060 
2061 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2062 			       unsigned int ioctl, unsigned long arg)
2063 {
2064 	struct kvm_vcpu *vcpu = filp->private_data;
2065 	void __user *argp = (void __user *)arg;
2066 
2067 	if (ioctl == KVM_INTERRUPT) {
2068 		struct kvm_interrupt irq;
2069 		if (copy_from_user(&irq, argp, sizeof(irq)))
2070 			return -EFAULT;
2071 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2072 	}
2073 	return -ENOIOCTLCMD;
2074 }
2075 
2076 long kvm_arch_vcpu_ioctl(struct file *filp,
2077                          unsigned int ioctl, unsigned long arg)
2078 {
2079 	struct kvm_vcpu *vcpu = filp->private_data;
2080 	void __user *argp = (void __user *)arg;
2081 	long r;
2082 
2083 	switch (ioctl) {
2084 	case KVM_ENABLE_CAP:
2085 	{
2086 		struct kvm_enable_cap cap;
2087 		r = -EFAULT;
2088 		if (copy_from_user(&cap, argp, sizeof(cap)))
2089 			goto out;
2090 		vcpu_load(vcpu);
2091 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2092 		vcpu_put(vcpu);
2093 		break;
2094 	}
2095 
2096 	case KVM_SET_ONE_REG:
2097 	case KVM_GET_ONE_REG:
2098 	{
2099 		struct kvm_one_reg reg;
2100 		r = -EFAULT;
2101 		if (copy_from_user(&reg, argp, sizeof(reg)))
2102 			goto out;
2103 		if (ioctl == KVM_SET_ONE_REG)
2104 			r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
2105 		else
2106 			r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
2107 		break;
2108 	}
2109 
2110 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2111 	case KVM_DIRTY_TLB: {
2112 		struct kvm_dirty_tlb dirty;
2113 		r = -EFAULT;
2114 		if (copy_from_user(&dirty, argp, sizeof(dirty)))
2115 			goto out;
2116 		vcpu_load(vcpu);
2117 		r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2118 		vcpu_put(vcpu);
2119 		break;
2120 	}
2121 #endif
2122 	default:
2123 		r = -EINVAL;
2124 	}
2125 
2126 out:
2127 	return r;
2128 }
2129 
2130 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2131 {
2132 	return VM_FAULT_SIGBUS;
2133 }
2134 
2135 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2136 {
2137 	u32 inst_nop = 0x60000000;
2138 #ifdef CONFIG_KVM_BOOKE_HV
2139 	u32 inst_sc1 = 0x44000022;
2140 	pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2141 	pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2142 	pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2143 	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2144 #else
2145 	u32 inst_lis = 0x3c000000;
2146 	u32 inst_ori = 0x60000000;
2147 	u32 inst_sc = 0x44000002;
2148 	u32 inst_imm_mask = 0xffff;
2149 
2150 	/*
2151 	 * The hypercall to get into KVM from within guest context is as
2152 	 * follows:
2153 	 *
2154 	 *    lis r0, r0, KVM_SC_MAGIC_R0@h
2155 	 *    ori r0, KVM_SC_MAGIC_R0@l
2156 	 *    sc
2157 	 *    nop
2158 	 */
2159 	pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2160 	pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2161 	pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2162 	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2163 #endif
2164 
2165 	pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2166 
2167 	return 0;
2168 }
2169 
2170 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2171 			  bool line_status)
2172 {
2173 	if (!irqchip_in_kernel(kvm))
2174 		return -ENXIO;
2175 
2176 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2177 					irq_event->irq, irq_event->level,
2178 					line_status);
2179 	return 0;
2180 }
2181 
2182 
2183 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2184 			    struct kvm_enable_cap *cap)
2185 {
2186 	int r;
2187 
2188 	if (cap->flags)
2189 		return -EINVAL;
2190 
2191 	switch (cap->cap) {
2192 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2193 	case KVM_CAP_PPC_ENABLE_HCALL: {
2194 		unsigned long hcall = cap->args[0];
2195 
2196 		r = -EINVAL;
2197 		if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2198 		    cap->args[1] > 1)
2199 			break;
2200 		if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2201 			break;
2202 		if (cap->args[1])
2203 			set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2204 		else
2205 			clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2206 		r = 0;
2207 		break;
2208 	}
2209 	case KVM_CAP_PPC_SMT: {
2210 		unsigned long mode = cap->args[0];
2211 		unsigned long flags = cap->args[1];
2212 
2213 		r = -EINVAL;
2214 		if (kvm->arch.kvm_ops->set_smt_mode)
2215 			r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2216 		break;
2217 	}
2218 
2219 	case KVM_CAP_PPC_NESTED_HV:
2220 		r = -EINVAL;
2221 		if (!is_kvmppc_hv_enabled(kvm) ||
2222 		    !kvm->arch.kvm_ops->enable_nested)
2223 			break;
2224 		r = kvm->arch.kvm_ops->enable_nested(kvm);
2225 		break;
2226 #endif
2227 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2228 	case KVM_CAP_PPC_SECURE_GUEST:
2229 		r = -EINVAL;
2230 		if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2231 			break;
2232 		r = kvm->arch.kvm_ops->enable_svm(kvm);
2233 		break;
2234 	case KVM_CAP_PPC_DAWR1:
2235 		r = -EINVAL;
2236 		if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2237 			break;
2238 		r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2239 		break;
2240 #endif
2241 	default:
2242 		r = -EINVAL;
2243 		break;
2244 	}
2245 
2246 	return r;
2247 }
2248 
2249 #ifdef CONFIG_PPC_BOOK3S_64
2250 /*
2251  * These functions check whether the underlying hardware is safe
2252  * against attacks based on observing the effects of speculatively
2253  * executed instructions, and whether it supplies instructions for
2254  * use in workarounds.  The information comes from firmware, either
2255  * via the device tree on powernv platforms or from an hcall on
2256  * pseries platforms.
2257  */
2258 #ifdef CONFIG_PPC_PSERIES
2259 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2260 {
2261 	struct h_cpu_char_result c;
2262 	unsigned long rc;
2263 
2264 	if (!machine_is(pseries))
2265 		return -ENOTTY;
2266 
2267 	rc = plpar_get_cpu_characteristics(&c);
2268 	if (rc == H_SUCCESS) {
2269 		cp->character = c.character;
2270 		cp->behaviour = c.behaviour;
2271 		cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2272 			KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2273 			KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2274 			KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2275 			KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2276 			KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2277 			KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2278 			KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2279 			KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2280 		cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2281 			KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2282 			KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2283 			KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2284 	}
2285 	return 0;
2286 }
2287 #else
2288 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2289 {
2290 	return -ENOTTY;
2291 }
2292 #endif
2293 
2294 static inline bool have_fw_feat(struct device_node *fw_features,
2295 				const char *state, const char *name)
2296 {
2297 	struct device_node *np;
2298 	bool r = false;
2299 
2300 	np = of_get_child_by_name(fw_features, name);
2301 	if (np) {
2302 		r = of_property_read_bool(np, state);
2303 		of_node_put(np);
2304 	}
2305 	return r;
2306 }
2307 
2308 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2309 {
2310 	struct device_node *np, *fw_features;
2311 	int r;
2312 
2313 	memset(cp, 0, sizeof(*cp));
2314 	r = pseries_get_cpu_char(cp);
2315 	if (r != -ENOTTY)
2316 		return r;
2317 
2318 	np = of_find_node_by_name(NULL, "ibm,opal");
2319 	if (np) {
2320 		fw_features = of_get_child_by_name(np, "fw-features");
2321 		of_node_put(np);
2322 		if (!fw_features)
2323 			return 0;
2324 		if (have_fw_feat(fw_features, "enabled",
2325 				 "inst-spec-barrier-ori31,31,0"))
2326 			cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2327 		if (have_fw_feat(fw_features, "enabled",
2328 				 "fw-bcctrl-serialized"))
2329 			cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2330 		if (have_fw_feat(fw_features, "enabled",
2331 				 "inst-l1d-flush-ori30,30,0"))
2332 			cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2333 		if (have_fw_feat(fw_features, "enabled",
2334 				 "inst-l1d-flush-trig2"))
2335 			cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2336 		if (have_fw_feat(fw_features, "enabled",
2337 				 "fw-l1d-thread-split"))
2338 			cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2339 		if (have_fw_feat(fw_features, "enabled",
2340 				 "fw-count-cache-disabled"))
2341 			cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2342 		if (have_fw_feat(fw_features, "enabled",
2343 				 "fw-count-cache-flush-bcctr2,0,0"))
2344 			cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2345 		cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2346 			KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2347 			KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2348 			KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2349 			KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2350 			KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2351 			KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2352 
2353 		if (have_fw_feat(fw_features, "enabled",
2354 				 "speculation-policy-favor-security"))
2355 			cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2356 		if (!have_fw_feat(fw_features, "disabled",
2357 				  "needs-l1d-flush-msr-pr-0-to-1"))
2358 			cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2359 		if (!have_fw_feat(fw_features, "disabled",
2360 				  "needs-spec-barrier-for-bound-checks"))
2361 			cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2362 		if (have_fw_feat(fw_features, "enabled",
2363 				 "needs-count-cache-flush-on-context-switch"))
2364 			cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2365 		cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2366 			KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2367 			KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2368 			KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2369 
2370 		of_node_put(fw_features);
2371 	}
2372 
2373 	return 0;
2374 }
2375 #endif
2376 
2377 long kvm_arch_vm_ioctl(struct file *filp,
2378                        unsigned int ioctl, unsigned long arg)
2379 {
2380 	struct kvm *kvm __maybe_unused = filp->private_data;
2381 	void __user *argp = (void __user *)arg;
2382 	long r;
2383 
2384 	switch (ioctl) {
2385 	case KVM_PPC_GET_PVINFO: {
2386 		struct kvm_ppc_pvinfo pvinfo;
2387 		memset(&pvinfo, 0, sizeof(pvinfo));
2388 		r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2389 		if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2390 			r = -EFAULT;
2391 			goto out;
2392 		}
2393 
2394 		break;
2395 	}
2396 #ifdef CONFIG_SPAPR_TCE_IOMMU
2397 	case KVM_CREATE_SPAPR_TCE_64: {
2398 		struct kvm_create_spapr_tce_64 create_tce_64;
2399 
2400 		r = -EFAULT;
2401 		if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2402 			goto out;
2403 		if (create_tce_64.flags) {
2404 			r = -EINVAL;
2405 			goto out;
2406 		}
2407 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2408 		goto out;
2409 	}
2410 	case KVM_CREATE_SPAPR_TCE: {
2411 		struct kvm_create_spapr_tce create_tce;
2412 		struct kvm_create_spapr_tce_64 create_tce_64;
2413 
2414 		r = -EFAULT;
2415 		if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2416 			goto out;
2417 
2418 		create_tce_64.liobn = create_tce.liobn;
2419 		create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2420 		create_tce_64.offset = 0;
2421 		create_tce_64.size = create_tce.window_size >>
2422 				IOMMU_PAGE_SHIFT_4K;
2423 		create_tce_64.flags = 0;
2424 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2425 		goto out;
2426 	}
2427 #endif
2428 #ifdef CONFIG_PPC_BOOK3S_64
2429 	case KVM_PPC_GET_SMMU_INFO: {
2430 		struct kvm_ppc_smmu_info info;
2431 		struct kvm *kvm = filp->private_data;
2432 
2433 		memset(&info, 0, sizeof(info));
2434 		r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2435 		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2436 			r = -EFAULT;
2437 		break;
2438 	}
2439 	case KVM_PPC_RTAS_DEFINE_TOKEN: {
2440 		struct kvm *kvm = filp->private_data;
2441 
2442 		r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2443 		break;
2444 	}
2445 	case KVM_PPC_CONFIGURE_V3_MMU: {
2446 		struct kvm *kvm = filp->private_data;
2447 		struct kvm_ppc_mmuv3_cfg cfg;
2448 
2449 		r = -EINVAL;
2450 		if (!kvm->arch.kvm_ops->configure_mmu)
2451 			goto out;
2452 		r = -EFAULT;
2453 		if (copy_from_user(&cfg, argp, sizeof(cfg)))
2454 			goto out;
2455 		r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2456 		break;
2457 	}
2458 	case KVM_PPC_GET_RMMU_INFO: {
2459 		struct kvm *kvm = filp->private_data;
2460 		struct kvm_ppc_rmmu_info info;
2461 
2462 		r = -EINVAL;
2463 		if (!kvm->arch.kvm_ops->get_rmmu_info)
2464 			goto out;
2465 		r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2466 		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2467 			r = -EFAULT;
2468 		break;
2469 	}
2470 	case KVM_PPC_GET_CPU_CHAR: {
2471 		struct kvm_ppc_cpu_char cpuchar;
2472 
2473 		r = kvmppc_get_cpu_char(&cpuchar);
2474 		if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2475 			r = -EFAULT;
2476 		break;
2477 	}
2478 	case KVM_PPC_SVM_OFF: {
2479 		struct kvm *kvm = filp->private_data;
2480 
2481 		r = 0;
2482 		if (!kvm->arch.kvm_ops->svm_off)
2483 			goto out;
2484 
2485 		r = kvm->arch.kvm_ops->svm_off(kvm);
2486 		break;
2487 	}
2488 	default: {
2489 		struct kvm *kvm = filp->private_data;
2490 		r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2491 	}
2492 #else /* CONFIG_PPC_BOOK3S_64 */
2493 	default:
2494 		r = -ENOTTY;
2495 #endif
2496 	}
2497 out:
2498 	return r;
2499 }
2500 
2501 static DEFINE_IDA(lpid_inuse);
2502 static unsigned long nr_lpids;
2503 
2504 long kvmppc_alloc_lpid(void)
2505 {
2506 	int lpid;
2507 
2508 	/* The host LPID must always be 0 (allocation starts at 1) */
2509 	lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
2510 	if (lpid < 0) {
2511 		if (lpid == -ENOMEM)
2512 			pr_err("%s: Out of memory\n", __func__);
2513 		else
2514 			pr_err("%s: No LPIDs free\n", __func__);
2515 		return -ENOMEM;
2516 	}
2517 
2518 	return lpid;
2519 }
2520 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2521 
2522 void kvmppc_free_lpid(long lpid)
2523 {
2524 	ida_free(&lpid_inuse, lpid);
2525 }
2526 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2527 
2528 /* nr_lpids_param includes the host LPID */
2529 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2530 {
2531 	nr_lpids = nr_lpids_param;
2532 }
2533 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2534 
2535 int kvm_arch_init(void *opaque)
2536 {
2537 	return 0;
2538 }
2539 
2540 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2541 
2542 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
2543 {
2544 	if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
2545 		vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
2546 }
2547 
2548 int kvm_arch_create_vm_debugfs(struct kvm *kvm)
2549 {
2550 	if (kvm->arch.kvm_ops->create_vm_debugfs)
2551 		kvm->arch.kvm_ops->create_vm_debugfs(kvm);
2552 	return 0;
2553 }
2554