xref: /openbmc/linux/arch/powerpc/kvm/powerpc.c (revision 1d3b5aaa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright IBM Corp. 2007
5  *
6  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
8  */
9 
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hrtimer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/fs.h>
17 #include <linux/slab.h>
18 #include <linux/file.h>
19 #include <linux/module.h>
20 #include <linux/irqbypass.h>
21 #include <linux/kvm_irqfd.h>
22 #include <asm/cputable.h>
23 #include <linux/uaccess.h>
24 #include <asm/kvm_ppc.h>
25 #include <asm/cputhreads.h>
26 #include <asm/irqflags.h>
27 #include <asm/iommu.h>
28 #include <asm/switch_to.h>
29 #include <asm/xive.h>
30 #ifdef CONFIG_PPC_PSERIES
31 #include <asm/hvcall.h>
32 #include <asm/plpar_wrappers.h>
33 #endif
34 #include <asm/ultravisor.h>
35 
36 #include "timing.h"
37 #include "irq.h"
38 #include "../mm/mmu_decl.h"
39 
40 #define CREATE_TRACE_POINTS
41 #include "trace.h"
42 
43 struct kvmppc_ops *kvmppc_hv_ops;
44 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
45 struct kvmppc_ops *kvmppc_pr_ops;
46 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
47 
48 
49 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
50 {
51 	return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
52 }
53 
54 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
55 {
56 	return kvm_arch_vcpu_runnable(vcpu);
57 }
58 
59 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
60 {
61 	return false;
62 }
63 
64 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
65 {
66 	return 1;
67 }
68 
69 /*
70  * Common checks before entering the guest world.  Call with interrupts
71  * disabled.
72  *
73  * returns:
74  *
75  * == 1 if we're ready to go into guest state
76  * <= 0 if we need to go back to the host with return value
77  */
78 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
79 {
80 	int r;
81 
82 	WARN_ON(irqs_disabled());
83 	hard_irq_disable();
84 
85 	while (true) {
86 		if (need_resched()) {
87 			local_irq_enable();
88 			cond_resched();
89 			hard_irq_disable();
90 			continue;
91 		}
92 
93 		if (signal_pending(current)) {
94 			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
95 			vcpu->run->exit_reason = KVM_EXIT_INTR;
96 			r = -EINTR;
97 			break;
98 		}
99 
100 		vcpu->mode = IN_GUEST_MODE;
101 
102 		/*
103 		 * Reading vcpu->requests must happen after setting vcpu->mode,
104 		 * so we don't miss a request because the requester sees
105 		 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
106 		 * before next entering the guest (and thus doesn't IPI).
107 		 * This also orders the write to mode from any reads
108 		 * to the page tables done while the VCPU is running.
109 		 * Please see the comment in kvm_flush_remote_tlbs.
110 		 */
111 		smp_mb();
112 
113 		if (kvm_request_pending(vcpu)) {
114 			/* Make sure we process requests preemptable */
115 			local_irq_enable();
116 			trace_kvm_check_requests(vcpu);
117 			r = kvmppc_core_check_requests(vcpu);
118 			hard_irq_disable();
119 			if (r > 0)
120 				continue;
121 			break;
122 		}
123 
124 		if (kvmppc_core_prepare_to_enter(vcpu)) {
125 			/* interrupts got enabled in between, so we
126 			   are back at square 1 */
127 			continue;
128 		}
129 
130 		guest_enter_irqoff();
131 		return 1;
132 	}
133 
134 	/* return to host */
135 	local_irq_enable();
136 	return r;
137 }
138 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
139 
140 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
141 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
142 {
143 	struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
144 	int i;
145 
146 	shared->sprg0 = swab64(shared->sprg0);
147 	shared->sprg1 = swab64(shared->sprg1);
148 	shared->sprg2 = swab64(shared->sprg2);
149 	shared->sprg3 = swab64(shared->sprg3);
150 	shared->srr0 = swab64(shared->srr0);
151 	shared->srr1 = swab64(shared->srr1);
152 	shared->dar = swab64(shared->dar);
153 	shared->msr = swab64(shared->msr);
154 	shared->dsisr = swab32(shared->dsisr);
155 	shared->int_pending = swab32(shared->int_pending);
156 	for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
157 		shared->sr[i] = swab32(shared->sr[i]);
158 }
159 #endif
160 
161 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
162 {
163 	int nr = kvmppc_get_gpr(vcpu, 11);
164 	int r;
165 	unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
166 	unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
167 	unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
168 	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
169 	unsigned long r2 = 0;
170 
171 	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
172 		/* 32 bit mode */
173 		param1 &= 0xffffffff;
174 		param2 &= 0xffffffff;
175 		param3 &= 0xffffffff;
176 		param4 &= 0xffffffff;
177 	}
178 
179 	switch (nr) {
180 	case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
181 	{
182 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
183 		/* Book3S can be little endian, find it out here */
184 		int shared_big_endian = true;
185 		if (vcpu->arch.intr_msr & MSR_LE)
186 			shared_big_endian = false;
187 		if (shared_big_endian != vcpu->arch.shared_big_endian)
188 			kvmppc_swab_shared(vcpu);
189 		vcpu->arch.shared_big_endian = shared_big_endian;
190 #endif
191 
192 		if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
193 			/*
194 			 * Older versions of the Linux magic page code had
195 			 * a bug where they would map their trampoline code
196 			 * NX. If that's the case, remove !PR NX capability.
197 			 */
198 			vcpu->arch.disable_kernel_nx = true;
199 			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
200 		}
201 
202 		vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
203 		vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
204 
205 #ifdef CONFIG_PPC_64K_PAGES
206 		/*
207 		 * Make sure our 4k magic page is in the same window of a 64k
208 		 * page within the guest and within the host's page.
209 		 */
210 		if ((vcpu->arch.magic_page_pa & 0xf000) !=
211 		    ((ulong)vcpu->arch.shared & 0xf000)) {
212 			void *old_shared = vcpu->arch.shared;
213 			ulong shared = (ulong)vcpu->arch.shared;
214 			void *new_shared;
215 
216 			shared &= PAGE_MASK;
217 			shared |= vcpu->arch.magic_page_pa & 0xf000;
218 			new_shared = (void*)shared;
219 			memcpy(new_shared, old_shared, 0x1000);
220 			vcpu->arch.shared = new_shared;
221 		}
222 #endif
223 
224 		r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
225 
226 		r = EV_SUCCESS;
227 		break;
228 	}
229 	case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
230 		r = EV_SUCCESS;
231 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
232 		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
233 #endif
234 
235 		/* Second return value is in r4 */
236 		break;
237 	case EV_HCALL_TOKEN(EV_IDLE):
238 		r = EV_SUCCESS;
239 		kvm_vcpu_halt(vcpu);
240 		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
241 		break;
242 	default:
243 		r = EV_UNIMPLEMENTED;
244 		break;
245 	}
246 
247 	kvmppc_set_gpr(vcpu, 4, r2);
248 
249 	return r;
250 }
251 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
252 
253 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
254 {
255 	int r = false;
256 
257 	/* We have to know what CPU to virtualize */
258 	if (!vcpu->arch.pvr)
259 		goto out;
260 
261 	/* PAPR only works with book3s_64 */
262 	if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
263 		goto out;
264 
265 	/* HV KVM can only do PAPR mode for now */
266 	if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
267 		goto out;
268 
269 #ifdef CONFIG_KVM_BOOKE_HV
270 	if (!cpu_has_feature(CPU_FTR_EMB_HV))
271 		goto out;
272 #endif
273 
274 	r = true;
275 
276 out:
277 	vcpu->arch.sane = r;
278 	return r ? 0 : -EINVAL;
279 }
280 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
281 
282 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
283 {
284 	enum emulation_result er;
285 	int r;
286 
287 	er = kvmppc_emulate_loadstore(vcpu);
288 	switch (er) {
289 	case EMULATE_DONE:
290 		/* Future optimization: only reload non-volatiles if they were
291 		 * actually modified. */
292 		r = RESUME_GUEST_NV;
293 		break;
294 	case EMULATE_AGAIN:
295 		r = RESUME_GUEST;
296 		break;
297 	case EMULATE_DO_MMIO:
298 		vcpu->run->exit_reason = KVM_EXIT_MMIO;
299 		/* We must reload nonvolatiles because "update" load/store
300 		 * instructions modify register state. */
301 		/* Future optimization: only reload non-volatiles if they were
302 		 * actually modified. */
303 		r = RESUME_HOST_NV;
304 		break;
305 	case EMULATE_FAIL:
306 	{
307 		u32 last_inst;
308 
309 		kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
310 		kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
311 				      last_inst);
312 
313 		/*
314 		 * Injecting a Data Storage here is a bit more
315 		 * accurate since the instruction that caused the
316 		 * access could still be a valid one.
317 		 */
318 		if (!IS_ENABLED(CONFIG_BOOKE)) {
319 			ulong dsisr = DSISR_BADACCESS;
320 
321 			if (vcpu->mmio_is_write)
322 				dsisr |= DSISR_ISSTORE;
323 
324 			kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr);
325 		} else {
326 			/*
327 			 * BookE does not send a SIGBUS on a bad
328 			 * fault, so use a Program interrupt instead
329 			 * to avoid a fault loop.
330 			 */
331 			kvmppc_core_queue_program(vcpu, 0);
332 		}
333 
334 		r = RESUME_GUEST;
335 		break;
336 	}
337 	default:
338 		WARN_ON(1);
339 		r = RESUME_GUEST;
340 	}
341 
342 	return r;
343 }
344 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
345 
346 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
347 	      bool data)
348 {
349 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
350 	struct kvmppc_pte pte;
351 	int r = -EINVAL;
352 
353 	vcpu->stat.st++;
354 
355 	if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
356 		r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
357 							    size);
358 
359 	if ((!r) || (r == -EAGAIN))
360 		return r;
361 
362 	r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
363 			 XLATE_WRITE, &pte);
364 	if (r < 0)
365 		return r;
366 
367 	*eaddr = pte.raddr;
368 
369 	if (!pte.may_write)
370 		return -EPERM;
371 
372 	/* Magic page override */
373 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
374 	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
375 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
376 		void *magic = vcpu->arch.shared;
377 		magic += pte.eaddr & 0xfff;
378 		memcpy(magic, ptr, size);
379 		return EMULATE_DONE;
380 	}
381 
382 	if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
383 		return EMULATE_DO_MMIO;
384 
385 	return EMULATE_DONE;
386 }
387 EXPORT_SYMBOL_GPL(kvmppc_st);
388 
389 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
390 		      bool data)
391 {
392 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
393 	struct kvmppc_pte pte;
394 	int rc = -EINVAL;
395 
396 	vcpu->stat.ld++;
397 
398 	if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
399 		rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
400 							      size);
401 
402 	if ((!rc) || (rc == -EAGAIN))
403 		return rc;
404 
405 	rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
406 			  XLATE_READ, &pte);
407 	if (rc)
408 		return rc;
409 
410 	*eaddr = pte.raddr;
411 
412 	if (!pte.may_read)
413 		return -EPERM;
414 
415 	if (!data && !pte.may_execute)
416 		return -ENOEXEC;
417 
418 	/* Magic page override */
419 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
420 	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
421 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
422 		void *magic = vcpu->arch.shared;
423 		magic += pte.eaddr & 0xfff;
424 		memcpy(ptr, magic, size);
425 		return EMULATE_DONE;
426 	}
427 
428 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
429 	rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
430 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
431 	if (rc)
432 		return EMULATE_DO_MMIO;
433 
434 	return EMULATE_DONE;
435 }
436 EXPORT_SYMBOL_GPL(kvmppc_ld);
437 
438 int kvm_arch_hardware_enable(void)
439 {
440 	return 0;
441 }
442 
443 int kvm_arch_hardware_setup(void *opaque)
444 {
445 	return 0;
446 }
447 
448 int kvm_arch_check_processor_compat(void *opaque)
449 {
450 	return kvmppc_core_check_processor_compat();
451 }
452 
453 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
454 {
455 	struct kvmppc_ops *kvm_ops = NULL;
456 	int r;
457 
458 	/*
459 	 * if we have both HV and PR enabled, default is HV
460 	 */
461 	if (type == 0) {
462 		if (kvmppc_hv_ops)
463 			kvm_ops = kvmppc_hv_ops;
464 		else
465 			kvm_ops = kvmppc_pr_ops;
466 		if (!kvm_ops)
467 			goto err_out;
468 	} else	if (type == KVM_VM_PPC_HV) {
469 		if (!kvmppc_hv_ops)
470 			goto err_out;
471 		kvm_ops = kvmppc_hv_ops;
472 	} else if (type == KVM_VM_PPC_PR) {
473 		if (!kvmppc_pr_ops)
474 			goto err_out;
475 		kvm_ops = kvmppc_pr_ops;
476 	} else
477 		goto err_out;
478 
479 	if (!try_module_get(kvm_ops->owner))
480 		return -ENOENT;
481 
482 	kvm->arch.kvm_ops = kvm_ops;
483 	r = kvmppc_core_init_vm(kvm);
484 	if (r)
485 		module_put(kvm_ops->owner);
486 	return r;
487 err_out:
488 	return -EINVAL;
489 }
490 
491 void kvm_arch_destroy_vm(struct kvm *kvm)
492 {
493 #ifdef CONFIG_KVM_XICS
494 	/*
495 	 * We call kick_all_cpus_sync() to ensure that all
496 	 * CPUs have executed any pending IPIs before we
497 	 * continue and free VCPUs structures below.
498 	 */
499 	if (is_kvmppc_hv_enabled(kvm))
500 		kick_all_cpus_sync();
501 #endif
502 
503 	kvm_destroy_vcpus(kvm);
504 
505 	mutex_lock(&kvm->lock);
506 
507 	kvmppc_core_destroy_vm(kvm);
508 
509 	mutex_unlock(&kvm->lock);
510 
511 	/* drop the module reference */
512 	module_put(kvm->arch.kvm_ops->owner);
513 }
514 
515 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
516 {
517 	int r;
518 	/* Assume we're using HV mode when the HV module is loaded */
519 	int hv_enabled = kvmppc_hv_ops ? 1 : 0;
520 
521 	if (kvm) {
522 		/*
523 		 * Hooray - we know which VM type we're running on. Depend on
524 		 * that rather than the guess above.
525 		 */
526 		hv_enabled = is_kvmppc_hv_enabled(kvm);
527 	}
528 
529 	switch (ext) {
530 #ifdef CONFIG_BOOKE
531 	case KVM_CAP_PPC_BOOKE_SREGS:
532 	case KVM_CAP_PPC_BOOKE_WATCHDOG:
533 	case KVM_CAP_PPC_EPR:
534 #else
535 	case KVM_CAP_PPC_SEGSTATE:
536 	case KVM_CAP_PPC_HIOR:
537 	case KVM_CAP_PPC_PAPR:
538 #endif
539 	case KVM_CAP_PPC_UNSET_IRQ:
540 	case KVM_CAP_PPC_IRQ_LEVEL:
541 	case KVM_CAP_ENABLE_CAP:
542 	case KVM_CAP_ONE_REG:
543 	case KVM_CAP_IOEVENTFD:
544 	case KVM_CAP_DEVICE_CTRL:
545 	case KVM_CAP_IMMEDIATE_EXIT:
546 	case KVM_CAP_SET_GUEST_DEBUG:
547 		r = 1;
548 		break;
549 	case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
550 	case KVM_CAP_PPC_PAIRED_SINGLES:
551 	case KVM_CAP_PPC_OSI:
552 	case KVM_CAP_PPC_GET_PVINFO:
553 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
554 	case KVM_CAP_SW_TLB:
555 #endif
556 		/* We support this only for PR */
557 		r = !hv_enabled;
558 		break;
559 #ifdef CONFIG_KVM_MPIC
560 	case KVM_CAP_IRQ_MPIC:
561 		r = 1;
562 		break;
563 #endif
564 
565 #ifdef CONFIG_PPC_BOOK3S_64
566 	case KVM_CAP_SPAPR_TCE:
567 	case KVM_CAP_SPAPR_TCE_64:
568 		r = 1;
569 		break;
570 	case KVM_CAP_SPAPR_TCE_VFIO:
571 		r = !!cpu_has_feature(CPU_FTR_HVMODE);
572 		break;
573 	case KVM_CAP_PPC_RTAS:
574 	case KVM_CAP_PPC_FIXUP_HCALL:
575 	case KVM_CAP_PPC_ENABLE_HCALL:
576 #ifdef CONFIG_KVM_XICS
577 	case KVM_CAP_IRQ_XICS:
578 #endif
579 	case KVM_CAP_PPC_GET_CPU_CHAR:
580 		r = 1;
581 		break;
582 #ifdef CONFIG_KVM_XIVE
583 	case KVM_CAP_PPC_IRQ_XIVE:
584 		/*
585 		 * We need XIVE to be enabled on the platform (implies
586 		 * a POWER9 processor) and the PowerNV platform, as
587 		 * nested is not yet supported.
588 		 */
589 		r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
590 			kvmppc_xive_native_supported();
591 		break;
592 #endif
593 
594 	case KVM_CAP_PPC_ALLOC_HTAB:
595 		r = hv_enabled;
596 		break;
597 #endif /* CONFIG_PPC_BOOK3S_64 */
598 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
599 	case KVM_CAP_PPC_SMT:
600 		r = 0;
601 		if (kvm) {
602 			if (kvm->arch.emul_smt_mode > 1)
603 				r = kvm->arch.emul_smt_mode;
604 			else
605 				r = kvm->arch.smt_mode;
606 		} else if (hv_enabled) {
607 			if (cpu_has_feature(CPU_FTR_ARCH_300))
608 				r = 1;
609 			else
610 				r = threads_per_subcore;
611 		}
612 		break;
613 	case KVM_CAP_PPC_SMT_POSSIBLE:
614 		r = 1;
615 		if (hv_enabled) {
616 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
617 				r = ((threads_per_subcore << 1) - 1);
618 			else
619 				/* P9 can emulate dbells, so allow any mode */
620 				r = 8 | 4 | 2 | 1;
621 		}
622 		break;
623 	case KVM_CAP_PPC_RMA:
624 		r = 0;
625 		break;
626 	case KVM_CAP_PPC_HWRNG:
627 		r = kvmppc_hwrng_present();
628 		break;
629 	case KVM_CAP_PPC_MMU_RADIX:
630 		r = !!(hv_enabled && radix_enabled());
631 		break;
632 	case KVM_CAP_PPC_MMU_HASH_V3:
633 		r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
634 		       kvmppc_hv_ops->hash_v3_possible());
635 		break;
636 	case KVM_CAP_PPC_NESTED_HV:
637 		r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
638 		       !kvmppc_hv_ops->enable_nested(NULL));
639 		break;
640 #endif
641 	case KVM_CAP_SYNC_MMU:
642 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
643 		r = hv_enabled;
644 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
645 		r = 1;
646 #else
647 		r = 0;
648 #endif
649 		break;
650 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
651 	case KVM_CAP_PPC_HTAB_FD:
652 		r = hv_enabled;
653 		break;
654 #endif
655 	case KVM_CAP_NR_VCPUS:
656 		/*
657 		 * Recommending a number of CPUs is somewhat arbitrary; we
658 		 * return the number of present CPUs for -HV (since a host
659 		 * will have secondary threads "offline"), and for other KVM
660 		 * implementations just count online CPUs.
661 		 */
662 		if (hv_enabled)
663 			r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
664 		else
665 			r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
666 		break;
667 	case KVM_CAP_MAX_VCPUS:
668 		r = KVM_MAX_VCPUS;
669 		break;
670 	case KVM_CAP_MAX_VCPU_ID:
671 		r = KVM_MAX_VCPU_IDS;
672 		break;
673 #ifdef CONFIG_PPC_BOOK3S_64
674 	case KVM_CAP_PPC_GET_SMMU_INFO:
675 		r = 1;
676 		break;
677 	case KVM_CAP_SPAPR_MULTITCE:
678 		r = 1;
679 		break;
680 	case KVM_CAP_SPAPR_RESIZE_HPT:
681 		r = !!hv_enabled;
682 		break;
683 #endif
684 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
685 	case KVM_CAP_PPC_FWNMI:
686 		r = hv_enabled;
687 		break;
688 #endif
689 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
690 	case KVM_CAP_PPC_HTM:
691 		r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
692 		     (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
693 		break;
694 #endif
695 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
696 	case KVM_CAP_PPC_SECURE_GUEST:
697 		r = hv_enabled && kvmppc_hv_ops->enable_svm &&
698 			!kvmppc_hv_ops->enable_svm(NULL);
699 		break;
700 	case KVM_CAP_PPC_DAWR1:
701 		r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
702 		       !kvmppc_hv_ops->enable_dawr1(NULL));
703 		break;
704 	case KVM_CAP_PPC_RPT_INVALIDATE:
705 		r = 1;
706 		break;
707 #endif
708 	default:
709 		r = 0;
710 		break;
711 	}
712 	return r;
713 
714 }
715 
716 long kvm_arch_dev_ioctl(struct file *filp,
717                         unsigned int ioctl, unsigned long arg)
718 {
719 	return -EINVAL;
720 }
721 
722 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
723 {
724 	kvmppc_core_free_memslot(kvm, slot);
725 }
726 
727 int kvm_arch_prepare_memory_region(struct kvm *kvm,
728 				   const struct kvm_memory_slot *old,
729 				   struct kvm_memory_slot *new,
730 				   enum kvm_mr_change change)
731 {
732 	return kvmppc_core_prepare_memory_region(kvm, old, new, change);
733 }
734 
735 void kvm_arch_commit_memory_region(struct kvm *kvm,
736 				   struct kvm_memory_slot *old,
737 				   const struct kvm_memory_slot *new,
738 				   enum kvm_mr_change change)
739 {
740 	kvmppc_core_commit_memory_region(kvm, old, new, change);
741 }
742 
743 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
744 				   struct kvm_memory_slot *slot)
745 {
746 	kvmppc_core_flush_memslot(kvm, slot);
747 }
748 
749 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
750 {
751 	return 0;
752 }
753 
754 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
755 {
756 	struct kvm_vcpu *vcpu;
757 
758 	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
759 	kvmppc_decrementer_func(vcpu);
760 
761 	return HRTIMER_NORESTART;
762 }
763 
764 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
765 {
766 	int err;
767 
768 	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
769 	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
770 	vcpu->arch.dec_expires = get_tb();
771 
772 #ifdef CONFIG_KVM_EXIT_TIMING
773 	mutex_init(&vcpu->arch.exit_timing_lock);
774 #endif
775 	err = kvmppc_subarch_vcpu_init(vcpu);
776 	if (err)
777 		return err;
778 
779 	err = kvmppc_core_vcpu_create(vcpu);
780 	if (err)
781 		goto out_vcpu_uninit;
782 
783 	rcuwait_init(&vcpu->arch.wait);
784 	vcpu->arch.waitp = &vcpu->arch.wait;
785 	return 0;
786 
787 out_vcpu_uninit:
788 	kvmppc_subarch_vcpu_uninit(vcpu);
789 	return err;
790 }
791 
792 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
793 {
794 }
795 
796 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
797 {
798 	/* Make sure we're not using the vcpu anymore */
799 	hrtimer_cancel(&vcpu->arch.dec_timer);
800 
801 	switch (vcpu->arch.irq_type) {
802 	case KVMPPC_IRQ_MPIC:
803 		kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
804 		break;
805 	case KVMPPC_IRQ_XICS:
806 		if (xics_on_xive())
807 			kvmppc_xive_cleanup_vcpu(vcpu);
808 		else
809 			kvmppc_xics_free_icp(vcpu);
810 		break;
811 	case KVMPPC_IRQ_XIVE:
812 		kvmppc_xive_native_cleanup_vcpu(vcpu);
813 		break;
814 	}
815 
816 	kvmppc_core_vcpu_free(vcpu);
817 
818 	kvmppc_subarch_vcpu_uninit(vcpu);
819 }
820 
821 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
822 {
823 	return kvmppc_core_pending_dec(vcpu);
824 }
825 
826 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
827 {
828 #ifdef CONFIG_BOOKE
829 	/*
830 	 * vrsave (formerly usprg0) isn't used by Linux, but may
831 	 * be used by the guest.
832 	 *
833 	 * On non-booke this is associated with Altivec and
834 	 * is handled by code in book3s.c.
835 	 */
836 	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
837 #endif
838 	kvmppc_core_vcpu_load(vcpu, cpu);
839 }
840 
841 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
842 {
843 	kvmppc_core_vcpu_put(vcpu);
844 #ifdef CONFIG_BOOKE
845 	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
846 #endif
847 }
848 
849 /*
850  * irq_bypass_add_producer and irq_bypass_del_producer are only
851  * useful if the architecture supports PCI passthrough.
852  * irq_bypass_stop and irq_bypass_start are not needed and so
853  * kvm_ops are not defined for them.
854  */
855 bool kvm_arch_has_irq_bypass(void)
856 {
857 	return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
858 		(kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
859 }
860 
861 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
862 				     struct irq_bypass_producer *prod)
863 {
864 	struct kvm_kernel_irqfd *irqfd =
865 		container_of(cons, struct kvm_kernel_irqfd, consumer);
866 	struct kvm *kvm = irqfd->kvm;
867 
868 	if (kvm->arch.kvm_ops->irq_bypass_add_producer)
869 		return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
870 
871 	return 0;
872 }
873 
874 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
875 				      struct irq_bypass_producer *prod)
876 {
877 	struct kvm_kernel_irqfd *irqfd =
878 		container_of(cons, struct kvm_kernel_irqfd, consumer);
879 	struct kvm *kvm = irqfd->kvm;
880 
881 	if (kvm->arch.kvm_ops->irq_bypass_del_producer)
882 		kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
883 }
884 
885 #ifdef CONFIG_VSX
886 static inline int kvmppc_get_vsr_dword_offset(int index)
887 {
888 	int offset;
889 
890 	if ((index != 0) && (index != 1))
891 		return -1;
892 
893 #ifdef __BIG_ENDIAN
894 	offset =  index;
895 #else
896 	offset = 1 - index;
897 #endif
898 
899 	return offset;
900 }
901 
902 static inline int kvmppc_get_vsr_word_offset(int index)
903 {
904 	int offset;
905 
906 	if ((index > 3) || (index < 0))
907 		return -1;
908 
909 #ifdef __BIG_ENDIAN
910 	offset = index;
911 #else
912 	offset = 3 - index;
913 #endif
914 	return offset;
915 }
916 
917 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
918 	u64 gpr)
919 {
920 	union kvmppc_one_reg val;
921 	int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
922 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
923 
924 	if (offset == -1)
925 		return;
926 
927 	if (index >= 32) {
928 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
929 		val.vsxval[offset] = gpr;
930 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
931 	} else {
932 		VCPU_VSX_FPR(vcpu, index, offset) = gpr;
933 	}
934 }
935 
936 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
937 	u64 gpr)
938 {
939 	union kvmppc_one_reg val;
940 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
941 
942 	if (index >= 32) {
943 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
944 		val.vsxval[0] = gpr;
945 		val.vsxval[1] = gpr;
946 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
947 	} else {
948 		VCPU_VSX_FPR(vcpu, index, 0) = gpr;
949 		VCPU_VSX_FPR(vcpu, index, 1) = gpr;
950 	}
951 }
952 
953 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
954 	u32 gpr)
955 {
956 	union kvmppc_one_reg val;
957 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
958 
959 	if (index >= 32) {
960 		val.vsx32val[0] = gpr;
961 		val.vsx32val[1] = gpr;
962 		val.vsx32val[2] = gpr;
963 		val.vsx32val[3] = gpr;
964 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
965 	} else {
966 		val.vsx32val[0] = gpr;
967 		val.vsx32val[1] = gpr;
968 		VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
969 		VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
970 	}
971 }
972 
973 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
974 	u32 gpr32)
975 {
976 	union kvmppc_one_reg val;
977 	int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
978 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
979 	int dword_offset, word_offset;
980 
981 	if (offset == -1)
982 		return;
983 
984 	if (index >= 32) {
985 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
986 		val.vsx32val[offset] = gpr32;
987 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
988 	} else {
989 		dword_offset = offset / 2;
990 		word_offset = offset % 2;
991 		val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
992 		val.vsx32val[word_offset] = gpr32;
993 		VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
994 	}
995 }
996 #endif /* CONFIG_VSX */
997 
998 #ifdef CONFIG_ALTIVEC
999 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
1000 		int index, int element_size)
1001 {
1002 	int offset;
1003 	int elts = sizeof(vector128)/element_size;
1004 
1005 	if ((index < 0) || (index >= elts))
1006 		return -1;
1007 
1008 	if (kvmppc_need_byteswap(vcpu))
1009 		offset = elts - index - 1;
1010 	else
1011 		offset = index;
1012 
1013 	return offset;
1014 }
1015 
1016 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1017 		int index)
1018 {
1019 	return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1020 }
1021 
1022 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1023 		int index)
1024 {
1025 	return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1026 }
1027 
1028 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1029 		int index)
1030 {
1031 	return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1032 }
1033 
1034 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1035 		int index)
1036 {
1037 	return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1038 }
1039 
1040 
1041 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1042 	u64 gpr)
1043 {
1044 	union kvmppc_one_reg val;
1045 	int offset = kvmppc_get_vmx_dword_offset(vcpu,
1046 			vcpu->arch.mmio_vmx_offset);
1047 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1048 
1049 	if (offset == -1)
1050 		return;
1051 
1052 	val.vval = VCPU_VSX_VR(vcpu, index);
1053 	val.vsxval[offset] = gpr;
1054 	VCPU_VSX_VR(vcpu, index) = val.vval;
1055 }
1056 
1057 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1058 	u32 gpr32)
1059 {
1060 	union kvmppc_one_reg val;
1061 	int offset = kvmppc_get_vmx_word_offset(vcpu,
1062 			vcpu->arch.mmio_vmx_offset);
1063 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1064 
1065 	if (offset == -1)
1066 		return;
1067 
1068 	val.vval = VCPU_VSX_VR(vcpu, index);
1069 	val.vsx32val[offset] = gpr32;
1070 	VCPU_VSX_VR(vcpu, index) = val.vval;
1071 }
1072 
1073 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1074 	u16 gpr16)
1075 {
1076 	union kvmppc_one_reg val;
1077 	int offset = kvmppc_get_vmx_hword_offset(vcpu,
1078 			vcpu->arch.mmio_vmx_offset);
1079 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1080 
1081 	if (offset == -1)
1082 		return;
1083 
1084 	val.vval = VCPU_VSX_VR(vcpu, index);
1085 	val.vsx16val[offset] = gpr16;
1086 	VCPU_VSX_VR(vcpu, index) = val.vval;
1087 }
1088 
1089 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1090 	u8 gpr8)
1091 {
1092 	union kvmppc_one_reg val;
1093 	int offset = kvmppc_get_vmx_byte_offset(vcpu,
1094 			vcpu->arch.mmio_vmx_offset);
1095 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1096 
1097 	if (offset == -1)
1098 		return;
1099 
1100 	val.vval = VCPU_VSX_VR(vcpu, index);
1101 	val.vsx8val[offset] = gpr8;
1102 	VCPU_VSX_VR(vcpu, index) = val.vval;
1103 }
1104 #endif /* CONFIG_ALTIVEC */
1105 
1106 #ifdef CONFIG_PPC_FPU
1107 static inline u64 sp_to_dp(u32 fprs)
1108 {
1109 	u64 fprd;
1110 
1111 	preempt_disable();
1112 	enable_kernel_fp();
1113 	asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
1114 	     : "fr0");
1115 	preempt_enable();
1116 	return fprd;
1117 }
1118 
1119 static inline u32 dp_to_sp(u64 fprd)
1120 {
1121 	u32 fprs;
1122 
1123 	preempt_disable();
1124 	enable_kernel_fp();
1125 	asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
1126 	     : "fr0");
1127 	preempt_enable();
1128 	return fprs;
1129 }
1130 
1131 #else
1132 #define sp_to_dp(x)	(x)
1133 #define dp_to_sp(x)	(x)
1134 #endif /* CONFIG_PPC_FPU */
1135 
1136 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1137 {
1138 	struct kvm_run *run = vcpu->run;
1139 	u64 gpr;
1140 
1141 	if (run->mmio.len > sizeof(gpr))
1142 		return;
1143 
1144 	if (!vcpu->arch.mmio_host_swabbed) {
1145 		switch (run->mmio.len) {
1146 		case 8: gpr = *(u64 *)run->mmio.data; break;
1147 		case 4: gpr = *(u32 *)run->mmio.data; break;
1148 		case 2: gpr = *(u16 *)run->mmio.data; break;
1149 		case 1: gpr = *(u8 *)run->mmio.data; break;
1150 		}
1151 	} else {
1152 		switch (run->mmio.len) {
1153 		case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1154 		case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1155 		case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1156 		case 1: gpr = *(u8 *)run->mmio.data; break;
1157 		}
1158 	}
1159 
1160 	/* conversion between single and double precision */
1161 	if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1162 		gpr = sp_to_dp(gpr);
1163 
1164 	if (vcpu->arch.mmio_sign_extend) {
1165 		switch (run->mmio.len) {
1166 #ifdef CONFIG_PPC64
1167 		case 4:
1168 			gpr = (s64)(s32)gpr;
1169 			break;
1170 #endif
1171 		case 2:
1172 			gpr = (s64)(s16)gpr;
1173 			break;
1174 		case 1:
1175 			gpr = (s64)(s8)gpr;
1176 			break;
1177 		}
1178 	}
1179 
1180 	switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1181 	case KVM_MMIO_REG_GPR:
1182 		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1183 		break;
1184 	case KVM_MMIO_REG_FPR:
1185 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1186 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1187 
1188 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1189 		break;
1190 #ifdef CONFIG_PPC_BOOK3S
1191 	case KVM_MMIO_REG_QPR:
1192 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1193 		break;
1194 	case KVM_MMIO_REG_FQPR:
1195 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1196 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1197 		break;
1198 #endif
1199 #ifdef CONFIG_VSX
1200 	case KVM_MMIO_REG_VSX:
1201 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1202 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1203 
1204 		if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1205 			kvmppc_set_vsr_dword(vcpu, gpr);
1206 		else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1207 			kvmppc_set_vsr_word(vcpu, gpr);
1208 		else if (vcpu->arch.mmio_copy_type ==
1209 				KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1210 			kvmppc_set_vsr_dword_dump(vcpu, gpr);
1211 		else if (vcpu->arch.mmio_copy_type ==
1212 				KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1213 			kvmppc_set_vsr_word_dump(vcpu, gpr);
1214 		break;
1215 #endif
1216 #ifdef CONFIG_ALTIVEC
1217 	case KVM_MMIO_REG_VMX:
1218 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1219 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1220 
1221 		if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1222 			kvmppc_set_vmx_dword(vcpu, gpr);
1223 		else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1224 			kvmppc_set_vmx_word(vcpu, gpr);
1225 		else if (vcpu->arch.mmio_copy_type ==
1226 				KVMPPC_VMX_COPY_HWORD)
1227 			kvmppc_set_vmx_hword(vcpu, gpr);
1228 		else if (vcpu->arch.mmio_copy_type ==
1229 				KVMPPC_VMX_COPY_BYTE)
1230 			kvmppc_set_vmx_byte(vcpu, gpr);
1231 		break;
1232 #endif
1233 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1234 	case KVM_MMIO_REG_NESTED_GPR:
1235 		if (kvmppc_need_byteswap(vcpu))
1236 			gpr = swab64(gpr);
1237 		kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1238 				     sizeof(gpr));
1239 		break;
1240 #endif
1241 	default:
1242 		BUG();
1243 	}
1244 }
1245 
1246 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
1247 				unsigned int rt, unsigned int bytes,
1248 				int is_default_endian, int sign_extend)
1249 {
1250 	struct kvm_run *run = vcpu->run;
1251 	int idx, ret;
1252 	bool host_swabbed;
1253 
1254 	/* Pity C doesn't have a logical XOR operator */
1255 	if (kvmppc_need_byteswap(vcpu)) {
1256 		host_swabbed = is_default_endian;
1257 	} else {
1258 		host_swabbed = !is_default_endian;
1259 	}
1260 
1261 	if (bytes > sizeof(run->mmio.data))
1262 		return EMULATE_FAIL;
1263 
1264 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1265 	run->mmio.len = bytes;
1266 	run->mmio.is_write = 0;
1267 
1268 	vcpu->arch.io_gpr = rt;
1269 	vcpu->arch.mmio_host_swabbed = host_swabbed;
1270 	vcpu->mmio_needed = 1;
1271 	vcpu->mmio_is_write = 0;
1272 	vcpu->arch.mmio_sign_extend = sign_extend;
1273 
1274 	idx = srcu_read_lock(&vcpu->kvm->srcu);
1275 
1276 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1277 			      bytes, &run->mmio.data);
1278 
1279 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
1280 
1281 	if (!ret) {
1282 		kvmppc_complete_mmio_load(vcpu);
1283 		vcpu->mmio_needed = 0;
1284 		return EMULATE_DONE;
1285 	}
1286 
1287 	return EMULATE_DO_MMIO;
1288 }
1289 
1290 int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1291 		       unsigned int rt, unsigned int bytes,
1292 		       int is_default_endian)
1293 {
1294 	return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1295 }
1296 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1297 
1298 /* Same as above, but sign extends */
1299 int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
1300 			unsigned int rt, unsigned int bytes,
1301 			int is_default_endian)
1302 {
1303 	return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
1304 }
1305 
1306 #ifdef CONFIG_VSX
1307 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
1308 			unsigned int rt, unsigned int bytes,
1309 			int is_default_endian, int mmio_sign_extend)
1310 {
1311 	enum emulation_result emulated = EMULATE_DONE;
1312 
1313 	/* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1314 	if (vcpu->arch.mmio_vsx_copy_nums > 4)
1315 		return EMULATE_FAIL;
1316 
1317 	while (vcpu->arch.mmio_vsx_copy_nums) {
1318 		emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1319 			is_default_endian, mmio_sign_extend);
1320 
1321 		if (emulated != EMULATE_DONE)
1322 			break;
1323 
1324 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1325 
1326 		vcpu->arch.mmio_vsx_copy_nums--;
1327 		vcpu->arch.mmio_vsx_offset++;
1328 	}
1329 	return emulated;
1330 }
1331 #endif /* CONFIG_VSX */
1332 
1333 int kvmppc_handle_store(struct kvm_vcpu *vcpu,
1334 			u64 val, unsigned int bytes, int is_default_endian)
1335 {
1336 	struct kvm_run *run = vcpu->run;
1337 	void *data = run->mmio.data;
1338 	int idx, ret;
1339 	bool host_swabbed;
1340 
1341 	/* Pity C doesn't have a logical XOR operator */
1342 	if (kvmppc_need_byteswap(vcpu)) {
1343 		host_swabbed = is_default_endian;
1344 	} else {
1345 		host_swabbed = !is_default_endian;
1346 	}
1347 
1348 	if (bytes > sizeof(run->mmio.data))
1349 		return EMULATE_FAIL;
1350 
1351 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1352 	run->mmio.len = bytes;
1353 	run->mmio.is_write = 1;
1354 	vcpu->mmio_needed = 1;
1355 	vcpu->mmio_is_write = 1;
1356 
1357 	if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1358 		val = dp_to_sp(val);
1359 
1360 	/* Store the value at the lowest bytes in 'data'. */
1361 	if (!host_swabbed) {
1362 		switch (bytes) {
1363 		case 8: *(u64 *)data = val; break;
1364 		case 4: *(u32 *)data = val; break;
1365 		case 2: *(u16 *)data = val; break;
1366 		case 1: *(u8  *)data = val; break;
1367 		}
1368 	} else {
1369 		switch (bytes) {
1370 		case 8: *(u64 *)data = swab64(val); break;
1371 		case 4: *(u32 *)data = swab32(val); break;
1372 		case 2: *(u16 *)data = swab16(val); break;
1373 		case 1: *(u8  *)data = val; break;
1374 		}
1375 	}
1376 
1377 	idx = srcu_read_lock(&vcpu->kvm->srcu);
1378 
1379 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1380 			       bytes, &run->mmio.data);
1381 
1382 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
1383 
1384 	if (!ret) {
1385 		vcpu->mmio_needed = 0;
1386 		return EMULATE_DONE;
1387 	}
1388 
1389 	return EMULATE_DO_MMIO;
1390 }
1391 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1392 
1393 #ifdef CONFIG_VSX
1394 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1395 {
1396 	u32 dword_offset, word_offset;
1397 	union kvmppc_one_reg reg;
1398 	int vsx_offset = 0;
1399 	int copy_type = vcpu->arch.mmio_copy_type;
1400 	int result = 0;
1401 
1402 	switch (copy_type) {
1403 	case KVMPPC_VSX_COPY_DWORD:
1404 		vsx_offset =
1405 			kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1406 
1407 		if (vsx_offset == -1) {
1408 			result = -1;
1409 			break;
1410 		}
1411 
1412 		if (rs < 32) {
1413 			*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1414 		} else {
1415 			reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1416 			*val = reg.vsxval[vsx_offset];
1417 		}
1418 		break;
1419 
1420 	case KVMPPC_VSX_COPY_WORD:
1421 		vsx_offset =
1422 			kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1423 
1424 		if (vsx_offset == -1) {
1425 			result = -1;
1426 			break;
1427 		}
1428 
1429 		if (rs < 32) {
1430 			dword_offset = vsx_offset / 2;
1431 			word_offset = vsx_offset % 2;
1432 			reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1433 			*val = reg.vsx32val[word_offset];
1434 		} else {
1435 			reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1436 			*val = reg.vsx32val[vsx_offset];
1437 		}
1438 		break;
1439 
1440 	default:
1441 		result = -1;
1442 		break;
1443 	}
1444 
1445 	return result;
1446 }
1447 
1448 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
1449 			int rs, unsigned int bytes, int is_default_endian)
1450 {
1451 	u64 val;
1452 	enum emulation_result emulated = EMULATE_DONE;
1453 
1454 	vcpu->arch.io_gpr = rs;
1455 
1456 	/* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1457 	if (vcpu->arch.mmio_vsx_copy_nums > 4)
1458 		return EMULATE_FAIL;
1459 
1460 	while (vcpu->arch.mmio_vsx_copy_nums) {
1461 		if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1462 			return EMULATE_FAIL;
1463 
1464 		emulated = kvmppc_handle_store(vcpu,
1465 			 val, bytes, is_default_endian);
1466 
1467 		if (emulated != EMULATE_DONE)
1468 			break;
1469 
1470 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1471 
1472 		vcpu->arch.mmio_vsx_copy_nums--;
1473 		vcpu->arch.mmio_vsx_offset++;
1474 	}
1475 
1476 	return emulated;
1477 }
1478 
1479 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
1480 {
1481 	struct kvm_run *run = vcpu->run;
1482 	enum emulation_result emulated = EMULATE_FAIL;
1483 	int r;
1484 
1485 	vcpu->arch.paddr_accessed += run->mmio.len;
1486 
1487 	if (!vcpu->mmio_is_write) {
1488 		emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1489 			 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1490 	} else {
1491 		emulated = kvmppc_handle_vsx_store(vcpu,
1492 			 vcpu->arch.io_gpr, run->mmio.len, 1);
1493 	}
1494 
1495 	switch (emulated) {
1496 	case EMULATE_DO_MMIO:
1497 		run->exit_reason = KVM_EXIT_MMIO;
1498 		r = RESUME_HOST;
1499 		break;
1500 	case EMULATE_FAIL:
1501 		pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1502 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1503 		run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1504 		r = RESUME_HOST;
1505 		break;
1506 	default:
1507 		r = RESUME_GUEST;
1508 		break;
1509 	}
1510 	return r;
1511 }
1512 #endif /* CONFIG_VSX */
1513 
1514 #ifdef CONFIG_ALTIVEC
1515 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1516 		unsigned int rt, unsigned int bytes, int is_default_endian)
1517 {
1518 	enum emulation_result emulated = EMULATE_DONE;
1519 
1520 	if (vcpu->arch.mmio_vmx_copy_nums > 2)
1521 		return EMULATE_FAIL;
1522 
1523 	while (vcpu->arch.mmio_vmx_copy_nums) {
1524 		emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1525 				is_default_endian, 0);
1526 
1527 		if (emulated != EMULATE_DONE)
1528 			break;
1529 
1530 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1531 		vcpu->arch.mmio_vmx_copy_nums--;
1532 		vcpu->arch.mmio_vmx_offset++;
1533 	}
1534 
1535 	return emulated;
1536 }
1537 
1538 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1539 {
1540 	union kvmppc_one_reg reg;
1541 	int vmx_offset = 0;
1542 	int result = 0;
1543 
1544 	vmx_offset =
1545 		kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1546 
1547 	if (vmx_offset == -1)
1548 		return -1;
1549 
1550 	reg.vval = VCPU_VSX_VR(vcpu, index);
1551 	*val = reg.vsxval[vmx_offset];
1552 
1553 	return result;
1554 }
1555 
1556 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1557 {
1558 	union kvmppc_one_reg reg;
1559 	int vmx_offset = 0;
1560 	int result = 0;
1561 
1562 	vmx_offset =
1563 		kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1564 
1565 	if (vmx_offset == -1)
1566 		return -1;
1567 
1568 	reg.vval = VCPU_VSX_VR(vcpu, index);
1569 	*val = reg.vsx32val[vmx_offset];
1570 
1571 	return result;
1572 }
1573 
1574 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1575 {
1576 	union kvmppc_one_reg reg;
1577 	int vmx_offset = 0;
1578 	int result = 0;
1579 
1580 	vmx_offset =
1581 		kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1582 
1583 	if (vmx_offset == -1)
1584 		return -1;
1585 
1586 	reg.vval = VCPU_VSX_VR(vcpu, index);
1587 	*val = reg.vsx16val[vmx_offset];
1588 
1589 	return result;
1590 }
1591 
1592 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1593 {
1594 	union kvmppc_one_reg reg;
1595 	int vmx_offset = 0;
1596 	int result = 0;
1597 
1598 	vmx_offset =
1599 		kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1600 
1601 	if (vmx_offset == -1)
1602 		return -1;
1603 
1604 	reg.vval = VCPU_VSX_VR(vcpu, index);
1605 	*val = reg.vsx8val[vmx_offset];
1606 
1607 	return result;
1608 }
1609 
1610 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1611 		unsigned int rs, unsigned int bytes, int is_default_endian)
1612 {
1613 	u64 val = 0;
1614 	unsigned int index = rs & KVM_MMIO_REG_MASK;
1615 	enum emulation_result emulated = EMULATE_DONE;
1616 
1617 	if (vcpu->arch.mmio_vmx_copy_nums > 2)
1618 		return EMULATE_FAIL;
1619 
1620 	vcpu->arch.io_gpr = rs;
1621 
1622 	while (vcpu->arch.mmio_vmx_copy_nums) {
1623 		switch (vcpu->arch.mmio_copy_type) {
1624 		case KVMPPC_VMX_COPY_DWORD:
1625 			if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1626 				return EMULATE_FAIL;
1627 
1628 			break;
1629 		case KVMPPC_VMX_COPY_WORD:
1630 			if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1631 				return EMULATE_FAIL;
1632 			break;
1633 		case KVMPPC_VMX_COPY_HWORD:
1634 			if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1635 				return EMULATE_FAIL;
1636 			break;
1637 		case KVMPPC_VMX_COPY_BYTE:
1638 			if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1639 				return EMULATE_FAIL;
1640 			break;
1641 		default:
1642 			return EMULATE_FAIL;
1643 		}
1644 
1645 		emulated = kvmppc_handle_store(vcpu, val, bytes,
1646 				is_default_endian);
1647 		if (emulated != EMULATE_DONE)
1648 			break;
1649 
1650 		vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1651 		vcpu->arch.mmio_vmx_copy_nums--;
1652 		vcpu->arch.mmio_vmx_offset++;
1653 	}
1654 
1655 	return emulated;
1656 }
1657 
1658 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
1659 {
1660 	struct kvm_run *run = vcpu->run;
1661 	enum emulation_result emulated = EMULATE_FAIL;
1662 	int r;
1663 
1664 	vcpu->arch.paddr_accessed += run->mmio.len;
1665 
1666 	if (!vcpu->mmio_is_write) {
1667 		emulated = kvmppc_handle_vmx_load(vcpu,
1668 				vcpu->arch.io_gpr, run->mmio.len, 1);
1669 	} else {
1670 		emulated = kvmppc_handle_vmx_store(vcpu,
1671 				vcpu->arch.io_gpr, run->mmio.len, 1);
1672 	}
1673 
1674 	switch (emulated) {
1675 	case EMULATE_DO_MMIO:
1676 		run->exit_reason = KVM_EXIT_MMIO;
1677 		r = RESUME_HOST;
1678 		break;
1679 	case EMULATE_FAIL:
1680 		pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1681 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1682 		run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1683 		r = RESUME_HOST;
1684 		break;
1685 	default:
1686 		r = RESUME_GUEST;
1687 		break;
1688 	}
1689 	return r;
1690 }
1691 #endif /* CONFIG_ALTIVEC */
1692 
1693 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1694 {
1695 	int r = 0;
1696 	union kvmppc_one_reg val;
1697 	int size;
1698 
1699 	size = one_reg_size(reg->id);
1700 	if (size > sizeof(val))
1701 		return -EINVAL;
1702 
1703 	r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1704 	if (r == -EINVAL) {
1705 		r = 0;
1706 		switch (reg->id) {
1707 #ifdef CONFIG_ALTIVEC
1708 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1709 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1710 				r = -ENXIO;
1711 				break;
1712 			}
1713 			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1714 			break;
1715 		case KVM_REG_PPC_VSCR:
1716 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1717 				r = -ENXIO;
1718 				break;
1719 			}
1720 			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1721 			break;
1722 		case KVM_REG_PPC_VRSAVE:
1723 			val = get_reg_val(reg->id, vcpu->arch.vrsave);
1724 			break;
1725 #endif /* CONFIG_ALTIVEC */
1726 		default:
1727 			r = -EINVAL;
1728 			break;
1729 		}
1730 	}
1731 
1732 	if (r)
1733 		return r;
1734 
1735 	if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1736 		r = -EFAULT;
1737 
1738 	return r;
1739 }
1740 
1741 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1742 {
1743 	int r;
1744 	union kvmppc_one_reg val;
1745 	int size;
1746 
1747 	size = one_reg_size(reg->id);
1748 	if (size > sizeof(val))
1749 		return -EINVAL;
1750 
1751 	if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1752 		return -EFAULT;
1753 
1754 	r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1755 	if (r == -EINVAL) {
1756 		r = 0;
1757 		switch (reg->id) {
1758 #ifdef CONFIG_ALTIVEC
1759 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1760 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1761 				r = -ENXIO;
1762 				break;
1763 			}
1764 			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1765 			break;
1766 		case KVM_REG_PPC_VSCR:
1767 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1768 				r = -ENXIO;
1769 				break;
1770 			}
1771 			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1772 			break;
1773 		case KVM_REG_PPC_VRSAVE:
1774 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1775 				r = -ENXIO;
1776 				break;
1777 			}
1778 			vcpu->arch.vrsave = set_reg_val(reg->id, val);
1779 			break;
1780 #endif /* CONFIG_ALTIVEC */
1781 		default:
1782 			r = -EINVAL;
1783 			break;
1784 		}
1785 	}
1786 
1787 	return r;
1788 }
1789 
1790 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1791 {
1792 	struct kvm_run *run = vcpu->run;
1793 	int r;
1794 
1795 	vcpu_load(vcpu);
1796 
1797 	if (vcpu->mmio_needed) {
1798 		vcpu->mmio_needed = 0;
1799 		if (!vcpu->mmio_is_write)
1800 			kvmppc_complete_mmio_load(vcpu);
1801 #ifdef CONFIG_VSX
1802 		if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1803 			vcpu->arch.mmio_vsx_copy_nums--;
1804 			vcpu->arch.mmio_vsx_offset++;
1805 		}
1806 
1807 		if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1808 			r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
1809 			if (r == RESUME_HOST) {
1810 				vcpu->mmio_needed = 1;
1811 				goto out;
1812 			}
1813 		}
1814 #endif
1815 #ifdef CONFIG_ALTIVEC
1816 		if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1817 			vcpu->arch.mmio_vmx_copy_nums--;
1818 			vcpu->arch.mmio_vmx_offset++;
1819 		}
1820 
1821 		if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1822 			r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1823 			if (r == RESUME_HOST) {
1824 				vcpu->mmio_needed = 1;
1825 				goto out;
1826 			}
1827 		}
1828 #endif
1829 	} else if (vcpu->arch.osi_needed) {
1830 		u64 *gprs = run->osi.gprs;
1831 		int i;
1832 
1833 		for (i = 0; i < 32; i++)
1834 			kvmppc_set_gpr(vcpu, i, gprs[i]);
1835 		vcpu->arch.osi_needed = 0;
1836 	} else if (vcpu->arch.hcall_needed) {
1837 		int i;
1838 
1839 		kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1840 		for (i = 0; i < 9; ++i)
1841 			kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1842 		vcpu->arch.hcall_needed = 0;
1843 #ifdef CONFIG_BOOKE
1844 	} else if (vcpu->arch.epr_needed) {
1845 		kvmppc_set_epr(vcpu, run->epr.epr);
1846 		vcpu->arch.epr_needed = 0;
1847 #endif
1848 	}
1849 
1850 	kvm_sigset_activate(vcpu);
1851 
1852 	if (run->immediate_exit)
1853 		r = -EINTR;
1854 	else
1855 		r = kvmppc_vcpu_run(vcpu);
1856 
1857 	kvm_sigset_deactivate(vcpu);
1858 
1859 #ifdef CONFIG_ALTIVEC
1860 out:
1861 #endif
1862 
1863 	/*
1864 	 * We're already returning to userspace, don't pass the
1865 	 * RESUME_HOST flags along.
1866 	 */
1867 	if (r > 0)
1868 		r = 0;
1869 
1870 	vcpu_put(vcpu);
1871 	return r;
1872 }
1873 
1874 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1875 {
1876 	if (irq->irq == KVM_INTERRUPT_UNSET) {
1877 		kvmppc_core_dequeue_external(vcpu);
1878 		return 0;
1879 	}
1880 
1881 	kvmppc_core_queue_external(vcpu, irq);
1882 
1883 	kvm_vcpu_kick(vcpu);
1884 
1885 	return 0;
1886 }
1887 
1888 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1889 				     struct kvm_enable_cap *cap)
1890 {
1891 	int r;
1892 
1893 	if (cap->flags)
1894 		return -EINVAL;
1895 
1896 	switch (cap->cap) {
1897 	case KVM_CAP_PPC_OSI:
1898 		r = 0;
1899 		vcpu->arch.osi_enabled = true;
1900 		break;
1901 	case KVM_CAP_PPC_PAPR:
1902 		r = 0;
1903 		vcpu->arch.papr_enabled = true;
1904 		break;
1905 	case KVM_CAP_PPC_EPR:
1906 		r = 0;
1907 		if (cap->args[0])
1908 			vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1909 		else
1910 			vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1911 		break;
1912 #ifdef CONFIG_BOOKE
1913 	case KVM_CAP_PPC_BOOKE_WATCHDOG:
1914 		r = 0;
1915 		vcpu->arch.watchdog_enabled = true;
1916 		break;
1917 #endif
1918 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1919 	case KVM_CAP_SW_TLB: {
1920 		struct kvm_config_tlb cfg;
1921 		void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1922 
1923 		r = -EFAULT;
1924 		if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1925 			break;
1926 
1927 		r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1928 		break;
1929 	}
1930 #endif
1931 #ifdef CONFIG_KVM_MPIC
1932 	case KVM_CAP_IRQ_MPIC: {
1933 		struct fd f;
1934 		struct kvm_device *dev;
1935 
1936 		r = -EBADF;
1937 		f = fdget(cap->args[0]);
1938 		if (!f.file)
1939 			break;
1940 
1941 		r = -EPERM;
1942 		dev = kvm_device_from_filp(f.file);
1943 		if (dev)
1944 			r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1945 
1946 		fdput(f);
1947 		break;
1948 	}
1949 #endif
1950 #ifdef CONFIG_KVM_XICS
1951 	case KVM_CAP_IRQ_XICS: {
1952 		struct fd f;
1953 		struct kvm_device *dev;
1954 
1955 		r = -EBADF;
1956 		f = fdget(cap->args[0]);
1957 		if (!f.file)
1958 			break;
1959 
1960 		r = -EPERM;
1961 		dev = kvm_device_from_filp(f.file);
1962 		if (dev) {
1963 			if (xics_on_xive())
1964 				r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1965 			else
1966 				r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1967 		}
1968 
1969 		fdput(f);
1970 		break;
1971 	}
1972 #endif /* CONFIG_KVM_XICS */
1973 #ifdef CONFIG_KVM_XIVE
1974 	case KVM_CAP_PPC_IRQ_XIVE: {
1975 		struct fd f;
1976 		struct kvm_device *dev;
1977 
1978 		r = -EBADF;
1979 		f = fdget(cap->args[0]);
1980 		if (!f.file)
1981 			break;
1982 
1983 		r = -ENXIO;
1984 		if (!xive_enabled())
1985 			break;
1986 
1987 		r = -EPERM;
1988 		dev = kvm_device_from_filp(f.file);
1989 		if (dev)
1990 			r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1991 							    cap->args[1]);
1992 
1993 		fdput(f);
1994 		break;
1995 	}
1996 #endif /* CONFIG_KVM_XIVE */
1997 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1998 	case KVM_CAP_PPC_FWNMI:
1999 		r = -EINVAL;
2000 		if (!is_kvmppc_hv_enabled(vcpu->kvm))
2001 			break;
2002 		r = 0;
2003 		vcpu->kvm->arch.fwnmi_enabled = true;
2004 		break;
2005 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
2006 	default:
2007 		r = -EINVAL;
2008 		break;
2009 	}
2010 
2011 	if (!r)
2012 		r = kvmppc_sanity_check(vcpu);
2013 
2014 	return r;
2015 }
2016 
2017 bool kvm_arch_intc_initialized(struct kvm *kvm)
2018 {
2019 #ifdef CONFIG_KVM_MPIC
2020 	if (kvm->arch.mpic)
2021 		return true;
2022 #endif
2023 #ifdef CONFIG_KVM_XICS
2024 	if (kvm->arch.xics || kvm->arch.xive)
2025 		return true;
2026 #endif
2027 	return false;
2028 }
2029 
2030 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2031                                     struct kvm_mp_state *mp_state)
2032 {
2033 	return -EINVAL;
2034 }
2035 
2036 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2037                                     struct kvm_mp_state *mp_state)
2038 {
2039 	return -EINVAL;
2040 }
2041 
2042 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2043 			       unsigned int ioctl, unsigned long arg)
2044 {
2045 	struct kvm_vcpu *vcpu = filp->private_data;
2046 	void __user *argp = (void __user *)arg;
2047 
2048 	if (ioctl == KVM_INTERRUPT) {
2049 		struct kvm_interrupt irq;
2050 		if (copy_from_user(&irq, argp, sizeof(irq)))
2051 			return -EFAULT;
2052 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2053 	}
2054 	return -ENOIOCTLCMD;
2055 }
2056 
2057 long kvm_arch_vcpu_ioctl(struct file *filp,
2058                          unsigned int ioctl, unsigned long arg)
2059 {
2060 	struct kvm_vcpu *vcpu = filp->private_data;
2061 	void __user *argp = (void __user *)arg;
2062 	long r;
2063 
2064 	switch (ioctl) {
2065 	case KVM_ENABLE_CAP:
2066 	{
2067 		struct kvm_enable_cap cap;
2068 		r = -EFAULT;
2069 		if (copy_from_user(&cap, argp, sizeof(cap)))
2070 			goto out;
2071 		vcpu_load(vcpu);
2072 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2073 		vcpu_put(vcpu);
2074 		break;
2075 	}
2076 
2077 	case KVM_SET_ONE_REG:
2078 	case KVM_GET_ONE_REG:
2079 	{
2080 		struct kvm_one_reg reg;
2081 		r = -EFAULT;
2082 		if (copy_from_user(&reg, argp, sizeof(reg)))
2083 			goto out;
2084 		if (ioctl == KVM_SET_ONE_REG)
2085 			r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
2086 		else
2087 			r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
2088 		break;
2089 	}
2090 
2091 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2092 	case KVM_DIRTY_TLB: {
2093 		struct kvm_dirty_tlb dirty;
2094 		r = -EFAULT;
2095 		if (copy_from_user(&dirty, argp, sizeof(dirty)))
2096 			goto out;
2097 		vcpu_load(vcpu);
2098 		r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2099 		vcpu_put(vcpu);
2100 		break;
2101 	}
2102 #endif
2103 	default:
2104 		r = -EINVAL;
2105 	}
2106 
2107 out:
2108 	return r;
2109 }
2110 
2111 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2112 {
2113 	return VM_FAULT_SIGBUS;
2114 }
2115 
2116 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2117 {
2118 	u32 inst_nop = 0x60000000;
2119 #ifdef CONFIG_KVM_BOOKE_HV
2120 	u32 inst_sc1 = 0x44000022;
2121 	pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2122 	pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2123 	pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2124 	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2125 #else
2126 	u32 inst_lis = 0x3c000000;
2127 	u32 inst_ori = 0x60000000;
2128 	u32 inst_sc = 0x44000002;
2129 	u32 inst_imm_mask = 0xffff;
2130 
2131 	/*
2132 	 * The hypercall to get into KVM from within guest context is as
2133 	 * follows:
2134 	 *
2135 	 *    lis r0, r0, KVM_SC_MAGIC_R0@h
2136 	 *    ori r0, KVM_SC_MAGIC_R0@l
2137 	 *    sc
2138 	 *    nop
2139 	 */
2140 	pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2141 	pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2142 	pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2143 	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2144 #endif
2145 
2146 	pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2147 
2148 	return 0;
2149 }
2150 
2151 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2152 			  bool line_status)
2153 {
2154 	if (!irqchip_in_kernel(kvm))
2155 		return -ENXIO;
2156 
2157 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2158 					irq_event->irq, irq_event->level,
2159 					line_status);
2160 	return 0;
2161 }
2162 
2163 
2164 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2165 			    struct kvm_enable_cap *cap)
2166 {
2167 	int r;
2168 
2169 	if (cap->flags)
2170 		return -EINVAL;
2171 
2172 	switch (cap->cap) {
2173 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2174 	case KVM_CAP_PPC_ENABLE_HCALL: {
2175 		unsigned long hcall = cap->args[0];
2176 
2177 		r = -EINVAL;
2178 		if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2179 		    cap->args[1] > 1)
2180 			break;
2181 		if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2182 			break;
2183 		if (cap->args[1])
2184 			set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2185 		else
2186 			clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2187 		r = 0;
2188 		break;
2189 	}
2190 	case KVM_CAP_PPC_SMT: {
2191 		unsigned long mode = cap->args[0];
2192 		unsigned long flags = cap->args[1];
2193 
2194 		r = -EINVAL;
2195 		if (kvm->arch.kvm_ops->set_smt_mode)
2196 			r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2197 		break;
2198 	}
2199 
2200 	case KVM_CAP_PPC_NESTED_HV:
2201 		r = -EINVAL;
2202 		if (!is_kvmppc_hv_enabled(kvm) ||
2203 		    !kvm->arch.kvm_ops->enable_nested)
2204 			break;
2205 		r = kvm->arch.kvm_ops->enable_nested(kvm);
2206 		break;
2207 #endif
2208 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2209 	case KVM_CAP_PPC_SECURE_GUEST:
2210 		r = -EINVAL;
2211 		if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2212 			break;
2213 		r = kvm->arch.kvm_ops->enable_svm(kvm);
2214 		break;
2215 	case KVM_CAP_PPC_DAWR1:
2216 		r = -EINVAL;
2217 		if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2218 			break;
2219 		r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2220 		break;
2221 #endif
2222 	default:
2223 		r = -EINVAL;
2224 		break;
2225 	}
2226 
2227 	return r;
2228 }
2229 
2230 #ifdef CONFIG_PPC_BOOK3S_64
2231 /*
2232  * These functions check whether the underlying hardware is safe
2233  * against attacks based on observing the effects of speculatively
2234  * executed instructions, and whether it supplies instructions for
2235  * use in workarounds.  The information comes from firmware, either
2236  * via the device tree on powernv platforms or from an hcall on
2237  * pseries platforms.
2238  */
2239 #ifdef CONFIG_PPC_PSERIES
2240 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2241 {
2242 	struct h_cpu_char_result c;
2243 	unsigned long rc;
2244 
2245 	if (!machine_is(pseries))
2246 		return -ENOTTY;
2247 
2248 	rc = plpar_get_cpu_characteristics(&c);
2249 	if (rc == H_SUCCESS) {
2250 		cp->character = c.character;
2251 		cp->behaviour = c.behaviour;
2252 		cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2253 			KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2254 			KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2255 			KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2256 			KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2257 			KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2258 			KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2259 			KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2260 			KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2261 		cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2262 			KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2263 			KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2264 			KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2265 	}
2266 	return 0;
2267 }
2268 #else
2269 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2270 {
2271 	return -ENOTTY;
2272 }
2273 #endif
2274 
2275 static inline bool have_fw_feat(struct device_node *fw_features,
2276 				const char *state, const char *name)
2277 {
2278 	struct device_node *np;
2279 	bool r = false;
2280 
2281 	np = of_get_child_by_name(fw_features, name);
2282 	if (np) {
2283 		r = of_property_read_bool(np, state);
2284 		of_node_put(np);
2285 	}
2286 	return r;
2287 }
2288 
2289 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2290 {
2291 	struct device_node *np, *fw_features;
2292 	int r;
2293 
2294 	memset(cp, 0, sizeof(*cp));
2295 	r = pseries_get_cpu_char(cp);
2296 	if (r != -ENOTTY)
2297 		return r;
2298 
2299 	np = of_find_node_by_name(NULL, "ibm,opal");
2300 	if (np) {
2301 		fw_features = of_get_child_by_name(np, "fw-features");
2302 		of_node_put(np);
2303 		if (!fw_features)
2304 			return 0;
2305 		if (have_fw_feat(fw_features, "enabled",
2306 				 "inst-spec-barrier-ori31,31,0"))
2307 			cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2308 		if (have_fw_feat(fw_features, "enabled",
2309 				 "fw-bcctrl-serialized"))
2310 			cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2311 		if (have_fw_feat(fw_features, "enabled",
2312 				 "inst-l1d-flush-ori30,30,0"))
2313 			cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2314 		if (have_fw_feat(fw_features, "enabled",
2315 				 "inst-l1d-flush-trig2"))
2316 			cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2317 		if (have_fw_feat(fw_features, "enabled",
2318 				 "fw-l1d-thread-split"))
2319 			cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2320 		if (have_fw_feat(fw_features, "enabled",
2321 				 "fw-count-cache-disabled"))
2322 			cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2323 		if (have_fw_feat(fw_features, "enabled",
2324 				 "fw-count-cache-flush-bcctr2,0,0"))
2325 			cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2326 		cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2327 			KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2328 			KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2329 			KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2330 			KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2331 			KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2332 			KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2333 
2334 		if (have_fw_feat(fw_features, "enabled",
2335 				 "speculation-policy-favor-security"))
2336 			cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2337 		if (!have_fw_feat(fw_features, "disabled",
2338 				  "needs-l1d-flush-msr-pr-0-to-1"))
2339 			cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2340 		if (!have_fw_feat(fw_features, "disabled",
2341 				  "needs-spec-barrier-for-bound-checks"))
2342 			cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2343 		if (have_fw_feat(fw_features, "enabled",
2344 				 "needs-count-cache-flush-on-context-switch"))
2345 			cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2346 		cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2347 			KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2348 			KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2349 			KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2350 
2351 		of_node_put(fw_features);
2352 	}
2353 
2354 	return 0;
2355 }
2356 #endif
2357 
2358 long kvm_arch_vm_ioctl(struct file *filp,
2359                        unsigned int ioctl, unsigned long arg)
2360 {
2361 	struct kvm *kvm __maybe_unused = filp->private_data;
2362 	void __user *argp = (void __user *)arg;
2363 	long r;
2364 
2365 	switch (ioctl) {
2366 	case KVM_PPC_GET_PVINFO: {
2367 		struct kvm_ppc_pvinfo pvinfo;
2368 		memset(&pvinfo, 0, sizeof(pvinfo));
2369 		r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2370 		if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2371 			r = -EFAULT;
2372 			goto out;
2373 		}
2374 
2375 		break;
2376 	}
2377 #ifdef CONFIG_SPAPR_TCE_IOMMU
2378 	case KVM_CREATE_SPAPR_TCE_64: {
2379 		struct kvm_create_spapr_tce_64 create_tce_64;
2380 
2381 		r = -EFAULT;
2382 		if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2383 			goto out;
2384 		if (create_tce_64.flags) {
2385 			r = -EINVAL;
2386 			goto out;
2387 		}
2388 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2389 		goto out;
2390 	}
2391 	case KVM_CREATE_SPAPR_TCE: {
2392 		struct kvm_create_spapr_tce create_tce;
2393 		struct kvm_create_spapr_tce_64 create_tce_64;
2394 
2395 		r = -EFAULT;
2396 		if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2397 			goto out;
2398 
2399 		create_tce_64.liobn = create_tce.liobn;
2400 		create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2401 		create_tce_64.offset = 0;
2402 		create_tce_64.size = create_tce.window_size >>
2403 				IOMMU_PAGE_SHIFT_4K;
2404 		create_tce_64.flags = 0;
2405 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2406 		goto out;
2407 	}
2408 #endif
2409 #ifdef CONFIG_PPC_BOOK3S_64
2410 	case KVM_PPC_GET_SMMU_INFO: {
2411 		struct kvm_ppc_smmu_info info;
2412 		struct kvm *kvm = filp->private_data;
2413 
2414 		memset(&info, 0, sizeof(info));
2415 		r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2416 		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2417 			r = -EFAULT;
2418 		break;
2419 	}
2420 	case KVM_PPC_RTAS_DEFINE_TOKEN: {
2421 		struct kvm *kvm = filp->private_data;
2422 
2423 		r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2424 		break;
2425 	}
2426 	case KVM_PPC_CONFIGURE_V3_MMU: {
2427 		struct kvm *kvm = filp->private_data;
2428 		struct kvm_ppc_mmuv3_cfg cfg;
2429 
2430 		r = -EINVAL;
2431 		if (!kvm->arch.kvm_ops->configure_mmu)
2432 			goto out;
2433 		r = -EFAULT;
2434 		if (copy_from_user(&cfg, argp, sizeof(cfg)))
2435 			goto out;
2436 		r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2437 		break;
2438 	}
2439 	case KVM_PPC_GET_RMMU_INFO: {
2440 		struct kvm *kvm = filp->private_data;
2441 		struct kvm_ppc_rmmu_info info;
2442 
2443 		r = -EINVAL;
2444 		if (!kvm->arch.kvm_ops->get_rmmu_info)
2445 			goto out;
2446 		r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2447 		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2448 			r = -EFAULT;
2449 		break;
2450 	}
2451 	case KVM_PPC_GET_CPU_CHAR: {
2452 		struct kvm_ppc_cpu_char cpuchar;
2453 
2454 		r = kvmppc_get_cpu_char(&cpuchar);
2455 		if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2456 			r = -EFAULT;
2457 		break;
2458 	}
2459 	case KVM_PPC_SVM_OFF: {
2460 		struct kvm *kvm = filp->private_data;
2461 
2462 		r = 0;
2463 		if (!kvm->arch.kvm_ops->svm_off)
2464 			goto out;
2465 
2466 		r = kvm->arch.kvm_ops->svm_off(kvm);
2467 		break;
2468 	}
2469 	default: {
2470 		struct kvm *kvm = filp->private_data;
2471 		r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2472 	}
2473 #else /* CONFIG_PPC_BOOK3S_64 */
2474 	default:
2475 		r = -ENOTTY;
2476 #endif
2477 	}
2478 out:
2479 	return r;
2480 }
2481 
2482 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2483 static unsigned long nr_lpids;
2484 
2485 long kvmppc_alloc_lpid(void)
2486 {
2487 	long lpid;
2488 
2489 	do {
2490 		lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2491 		if (lpid >= nr_lpids) {
2492 			pr_err("%s: No LPIDs free\n", __func__);
2493 			return -ENOMEM;
2494 		}
2495 	} while (test_and_set_bit(lpid, lpid_inuse));
2496 
2497 	return lpid;
2498 }
2499 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2500 
2501 void kvmppc_claim_lpid(long lpid)
2502 {
2503 	set_bit(lpid, lpid_inuse);
2504 }
2505 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
2506 
2507 void kvmppc_free_lpid(long lpid)
2508 {
2509 	clear_bit(lpid, lpid_inuse);
2510 }
2511 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2512 
2513 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2514 {
2515 	nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2516 	memset(lpid_inuse, 0, sizeof(lpid_inuse));
2517 }
2518 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2519 
2520 int kvm_arch_init(void *opaque)
2521 {
2522 	return 0;
2523 }
2524 
2525 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2526 
2527 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
2528 {
2529 	if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
2530 		vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
2531 }
2532 
2533 int kvm_arch_create_vm_debugfs(struct kvm *kvm)
2534 {
2535 	if (kvm->arch.kvm_ops->create_vm_debugfs)
2536 		kvm->arch.kvm_ops->create_vm_debugfs(kvm);
2537 	return 0;
2538 }
2539