xref: /openbmc/linux/arch/powerpc/kvm/powerpc.c (revision 0edbfea5)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19  */
20 
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
26 #include <linux/fs.h>
27 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/module.h>
30 #include <asm/cputable.h>
31 #include <asm/uaccess.h>
32 #include <asm/kvm_ppc.h>
33 #include <asm/tlbflush.h>
34 #include <asm/cputhreads.h>
35 #include <asm/irqflags.h>
36 #include <asm/iommu.h>
37 #include "timing.h"
38 #include "irq.h"
39 #include "../mm/mmu_decl.h"
40 
41 #define CREATE_TRACE_POINTS
42 #include "trace.h"
43 
44 struct kvmppc_ops *kvmppc_hv_ops;
45 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
46 struct kvmppc_ops *kvmppc_pr_ops;
47 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
48 
49 
50 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
51 {
52 	return !!(v->arch.pending_exceptions) ||
53 	       v->requests;
54 }
55 
56 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
57 {
58 	return 1;
59 }
60 
61 /*
62  * Common checks before entering the guest world.  Call with interrupts
63  * disabled.
64  *
65  * returns:
66  *
67  * == 1 if we're ready to go into guest state
68  * <= 0 if we need to go back to the host with return value
69  */
70 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
71 {
72 	int r;
73 
74 	WARN_ON(irqs_disabled());
75 	hard_irq_disable();
76 
77 	while (true) {
78 		if (need_resched()) {
79 			local_irq_enable();
80 			cond_resched();
81 			hard_irq_disable();
82 			continue;
83 		}
84 
85 		if (signal_pending(current)) {
86 			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
87 			vcpu->run->exit_reason = KVM_EXIT_INTR;
88 			r = -EINTR;
89 			break;
90 		}
91 
92 		vcpu->mode = IN_GUEST_MODE;
93 
94 		/*
95 		 * Reading vcpu->requests must happen after setting vcpu->mode,
96 		 * so we don't miss a request because the requester sees
97 		 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
98 		 * before next entering the guest (and thus doesn't IPI).
99 		 * This also orders the write to mode from any reads
100 		 * to the page tables done while the VCPU is running.
101 		 * Please see the comment in kvm_flush_remote_tlbs.
102 		 */
103 		smp_mb();
104 
105 		if (vcpu->requests) {
106 			/* Make sure we process requests preemptable */
107 			local_irq_enable();
108 			trace_kvm_check_requests(vcpu);
109 			r = kvmppc_core_check_requests(vcpu);
110 			hard_irq_disable();
111 			if (r > 0)
112 				continue;
113 			break;
114 		}
115 
116 		if (kvmppc_core_prepare_to_enter(vcpu)) {
117 			/* interrupts got enabled in between, so we
118 			   are back at square 1 */
119 			continue;
120 		}
121 
122 		__kvm_guest_enter();
123 		return 1;
124 	}
125 
126 	/* return to host */
127 	local_irq_enable();
128 	return r;
129 }
130 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
131 
132 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
133 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
134 {
135 	struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
136 	int i;
137 
138 	shared->sprg0 = swab64(shared->sprg0);
139 	shared->sprg1 = swab64(shared->sprg1);
140 	shared->sprg2 = swab64(shared->sprg2);
141 	shared->sprg3 = swab64(shared->sprg3);
142 	shared->srr0 = swab64(shared->srr0);
143 	shared->srr1 = swab64(shared->srr1);
144 	shared->dar = swab64(shared->dar);
145 	shared->msr = swab64(shared->msr);
146 	shared->dsisr = swab32(shared->dsisr);
147 	shared->int_pending = swab32(shared->int_pending);
148 	for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
149 		shared->sr[i] = swab32(shared->sr[i]);
150 }
151 #endif
152 
153 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
154 {
155 	int nr = kvmppc_get_gpr(vcpu, 11);
156 	int r;
157 	unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
158 	unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
159 	unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
160 	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
161 	unsigned long r2 = 0;
162 
163 	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
164 		/* 32 bit mode */
165 		param1 &= 0xffffffff;
166 		param2 &= 0xffffffff;
167 		param3 &= 0xffffffff;
168 		param4 &= 0xffffffff;
169 	}
170 
171 	switch (nr) {
172 	case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
173 	{
174 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
175 		/* Book3S can be little endian, find it out here */
176 		int shared_big_endian = true;
177 		if (vcpu->arch.intr_msr & MSR_LE)
178 			shared_big_endian = false;
179 		if (shared_big_endian != vcpu->arch.shared_big_endian)
180 			kvmppc_swab_shared(vcpu);
181 		vcpu->arch.shared_big_endian = shared_big_endian;
182 #endif
183 
184 		if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
185 			/*
186 			 * Older versions of the Linux magic page code had
187 			 * a bug where they would map their trampoline code
188 			 * NX. If that's the case, remove !PR NX capability.
189 			 */
190 			vcpu->arch.disable_kernel_nx = true;
191 			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
192 		}
193 
194 		vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
195 		vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
196 
197 #ifdef CONFIG_PPC_64K_PAGES
198 		/*
199 		 * Make sure our 4k magic page is in the same window of a 64k
200 		 * page within the guest and within the host's page.
201 		 */
202 		if ((vcpu->arch.magic_page_pa & 0xf000) !=
203 		    ((ulong)vcpu->arch.shared & 0xf000)) {
204 			void *old_shared = vcpu->arch.shared;
205 			ulong shared = (ulong)vcpu->arch.shared;
206 			void *new_shared;
207 
208 			shared &= PAGE_MASK;
209 			shared |= vcpu->arch.magic_page_pa & 0xf000;
210 			new_shared = (void*)shared;
211 			memcpy(new_shared, old_shared, 0x1000);
212 			vcpu->arch.shared = new_shared;
213 		}
214 #endif
215 
216 		r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
217 
218 		r = EV_SUCCESS;
219 		break;
220 	}
221 	case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
222 		r = EV_SUCCESS;
223 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
224 		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
225 #endif
226 
227 		/* Second return value is in r4 */
228 		break;
229 	case EV_HCALL_TOKEN(EV_IDLE):
230 		r = EV_SUCCESS;
231 		kvm_vcpu_block(vcpu);
232 		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
233 		break;
234 	default:
235 		r = EV_UNIMPLEMENTED;
236 		break;
237 	}
238 
239 	kvmppc_set_gpr(vcpu, 4, r2);
240 
241 	return r;
242 }
243 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
244 
245 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
246 {
247 	int r = false;
248 
249 	/* We have to know what CPU to virtualize */
250 	if (!vcpu->arch.pvr)
251 		goto out;
252 
253 	/* PAPR only works with book3s_64 */
254 	if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
255 		goto out;
256 
257 	/* HV KVM can only do PAPR mode for now */
258 	if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
259 		goto out;
260 
261 #ifdef CONFIG_KVM_BOOKE_HV
262 	if (!cpu_has_feature(CPU_FTR_EMB_HV))
263 		goto out;
264 #endif
265 
266 	r = true;
267 
268 out:
269 	vcpu->arch.sane = r;
270 	return r ? 0 : -EINVAL;
271 }
272 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
273 
274 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
275 {
276 	enum emulation_result er;
277 	int r;
278 
279 	er = kvmppc_emulate_loadstore(vcpu);
280 	switch (er) {
281 	case EMULATE_DONE:
282 		/* Future optimization: only reload non-volatiles if they were
283 		 * actually modified. */
284 		r = RESUME_GUEST_NV;
285 		break;
286 	case EMULATE_AGAIN:
287 		r = RESUME_GUEST;
288 		break;
289 	case EMULATE_DO_MMIO:
290 		run->exit_reason = KVM_EXIT_MMIO;
291 		/* We must reload nonvolatiles because "update" load/store
292 		 * instructions modify register state. */
293 		/* Future optimization: only reload non-volatiles if they were
294 		 * actually modified. */
295 		r = RESUME_HOST_NV;
296 		break;
297 	case EMULATE_FAIL:
298 	{
299 		u32 last_inst;
300 
301 		kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
302 		/* XXX Deliver Program interrupt to guest. */
303 		pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
304 		r = RESUME_HOST;
305 		break;
306 	}
307 	default:
308 		WARN_ON(1);
309 		r = RESUME_GUEST;
310 	}
311 
312 	return r;
313 }
314 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
315 
316 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
317 	      bool data)
318 {
319 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
320 	struct kvmppc_pte pte;
321 	int r;
322 
323 	vcpu->stat.st++;
324 
325 	r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
326 			 XLATE_WRITE, &pte);
327 	if (r < 0)
328 		return r;
329 
330 	*eaddr = pte.raddr;
331 
332 	if (!pte.may_write)
333 		return -EPERM;
334 
335 	/* Magic page override */
336 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
337 	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
338 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
339 		void *magic = vcpu->arch.shared;
340 		magic += pte.eaddr & 0xfff;
341 		memcpy(magic, ptr, size);
342 		return EMULATE_DONE;
343 	}
344 
345 	if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
346 		return EMULATE_DO_MMIO;
347 
348 	return EMULATE_DONE;
349 }
350 EXPORT_SYMBOL_GPL(kvmppc_st);
351 
352 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
353 		      bool data)
354 {
355 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
356 	struct kvmppc_pte pte;
357 	int rc;
358 
359 	vcpu->stat.ld++;
360 
361 	rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
362 			  XLATE_READ, &pte);
363 	if (rc)
364 		return rc;
365 
366 	*eaddr = pte.raddr;
367 
368 	if (!pte.may_read)
369 		return -EPERM;
370 
371 	if (!data && !pte.may_execute)
372 		return -ENOEXEC;
373 
374 	/* Magic page override */
375 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
376 	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
377 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
378 		void *magic = vcpu->arch.shared;
379 		magic += pte.eaddr & 0xfff;
380 		memcpy(ptr, magic, size);
381 		return EMULATE_DONE;
382 	}
383 
384 	if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
385 		return EMULATE_DO_MMIO;
386 
387 	return EMULATE_DONE;
388 }
389 EXPORT_SYMBOL_GPL(kvmppc_ld);
390 
391 int kvm_arch_hardware_enable(void)
392 {
393 	return 0;
394 }
395 
396 int kvm_arch_hardware_setup(void)
397 {
398 	return 0;
399 }
400 
401 void kvm_arch_check_processor_compat(void *rtn)
402 {
403 	*(int *)rtn = kvmppc_core_check_processor_compat();
404 }
405 
406 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
407 {
408 	struct kvmppc_ops *kvm_ops = NULL;
409 	/*
410 	 * if we have both HV and PR enabled, default is HV
411 	 */
412 	if (type == 0) {
413 		if (kvmppc_hv_ops)
414 			kvm_ops = kvmppc_hv_ops;
415 		else
416 			kvm_ops = kvmppc_pr_ops;
417 		if (!kvm_ops)
418 			goto err_out;
419 	} else	if (type == KVM_VM_PPC_HV) {
420 		if (!kvmppc_hv_ops)
421 			goto err_out;
422 		kvm_ops = kvmppc_hv_ops;
423 	} else if (type == KVM_VM_PPC_PR) {
424 		if (!kvmppc_pr_ops)
425 			goto err_out;
426 		kvm_ops = kvmppc_pr_ops;
427 	} else
428 		goto err_out;
429 
430 	if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
431 		return -ENOENT;
432 
433 	kvm->arch.kvm_ops = kvm_ops;
434 	return kvmppc_core_init_vm(kvm);
435 err_out:
436 	return -EINVAL;
437 }
438 
439 void kvm_arch_destroy_vm(struct kvm *kvm)
440 {
441 	unsigned int i;
442 	struct kvm_vcpu *vcpu;
443 
444 #ifdef CONFIG_KVM_XICS
445 	/*
446 	 * We call kick_all_cpus_sync() to ensure that all
447 	 * CPUs have executed any pending IPIs before we
448 	 * continue and free VCPUs structures below.
449 	 */
450 	if (is_kvmppc_hv_enabled(kvm))
451 		kick_all_cpus_sync();
452 #endif
453 
454 	kvm_for_each_vcpu(i, vcpu, kvm)
455 		kvm_arch_vcpu_free(vcpu);
456 
457 	mutex_lock(&kvm->lock);
458 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
459 		kvm->vcpus[i] = NULL;
460 
461 	atomic_set(&kvm->online_vcpus, 0);
462 
463 	kvmppc_core_destroy_vm(kvm);
464 
465 	mutex_unlock(&kvm->lock);
466 
467 	/* drop the module reference */
468 	module_put(kvm->arch.kvm_ops->owner);
469 }
470 
471 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
472 {
473 	int r;
474 	/* Assume we're using HV mode when the HV module is loaded */
475 	int hv_enabled = kvmppc_hv_ops ? 1 : 0;
476 
477 	if (kvm) {
478 		/*
479 		 * Hooray - we know which VM type we're running on. Depend on
480 		 * that rather than the guess above.
481 		 */
482 		hv_enabled = is_kvmppc_hv_enabled(kvm);
483 	}
484 
485 	switch (ext) {
486 #ifdef CONFIG_BOOKE
487 	case KVM_CAP_PPC_BOOKE_SREGS:
488 	case KVM_CAP_PPC_BOOKE_WATCHDOG:
489 	case KVM_CAP_PPC_EPR:
490 #else
491 	case KVM_CAP_PPC_SEGSTATE:
492 	case KVM_CAP_PPC_HIOR:
493 	case KVM_CAP_PPC_PAPR:
494 #endif
495 	case KVM_CAP_PPC_UNSET_IRQ:
496 	case KVM_CAP_PPC_IRQ_LEVEL:
497 	case KVM_CAP_ENABLE_CAP:
498 	case KVM_CAP_ENABLE_CAP_VM:
499 	case KVM_CAP_ONE_REG:
500 	case KVM_CAP_IOEVENTFD:
501 	case KVM_CAP_DEVICE_CTRL:
502 		r = 1;
503 		break;
504 	case KVM_CAP_PPC_PAIRED_SINGLES:
505 	case KVM_CAP_PPC_OSI:
506 	case KVM_CAP_PPC_GET_PVINFO:
507 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
508 	case KVM_CAP_SW_TLB:
509 #endif
510 		/* We support this only for PR */
511 		r = !hv_enabled;
512 		break;
513 #ifdef CONFIG_KVM_MMIO
514 	case KVM_CAP_COALESCED_MMIO:
515 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
516 		break;
517 #endif
518 #ifdef CONFIG_KVM_MPIC
519 	case KVM_CAP_IRQ_MPIC:
520 		r = 1;
521 		break;
522 #endif
523 
524 #ifdef CONFIG_PPC_BOOK3S_64
525 	case KVM_CAP_SPAPR_TCE:
526 	case KVM_CAP_SPAPR_TCE_64:
527 	case KVM_CAP_PPC_ALLOC_HTAB:
528 	case KVM_CAP_PPC_RTAS:
529 	case KVM_CAP_PPC_FIXUP_HCALL:
530 	case KVM_CAP_PPC_ENABLE_HCALL:
531 #ifdef CONFIG_KVM_XICS
532 	case KVM_CAP_IRQ_XICS:
533 #endif
534 		r = 1;
535 		break;
536 #endif /* CONFIG_PPC_BOOK3S_64 */
537 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
538 	case KVM_CAP_PPC_SMT:
539 		if (hv_enabled)
540 			r = threads_per_subcore;
541 		else
542 			r = 0;
543 		break;
544 	case KVM_CAP_PPC_RMA:
545 		r = 0;
546 		break;
547 	case KVM_CAP_PPC_HWRNG:
548 		r = kvmppc_hwrng_present();
549 		break;
550 #endif
551 	case KVM_CAP_SYNC_MMU:
552 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
553 		r = hv_enabled;
554 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
555 		r = 1;
556 #else
557 		r = 0;
558 #endif
559 		break;
560 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
561 	case KVM_CAP_PPC_HTAB_FD:
562 		r = hv_enabled;
563 		break;
564 #endif
565 	case KVM_CAP_NR_VCPUS:
566 		/*
567 		 * Recommending a number of CPUs is somewhat arbitrary; we
568 		 * return the number of present CPUs for -HV (since a host
569 		 * will have secondary threads "offline"), and for other KVM
570 		 * implementations just count online CPUs.
571 		 */
572 		if (hv_enabled)
573 			r = num_present_cpus();
574 		else
575 			r = num_online_cpus();
576 		break;
577 	case KVM_CAP_NR_MEMSLOTS:
578 		r = KVM_USER_MEM_SLOTS;
579 		break;
580 	case KVM_CAP_MAX_VCPUS:
581 		r = KVM_MAX_VCPUS;
582 		break;
583 #ifdef CONFIG_PPC_BOOK3S_64
584 	case KVM_CAP_PPC_GET_SMMU_INFO:
585 		r = 1;
586 		break;
587 	case KVM_CAP_SPAPR_MULTITCE:
588 		r = 1;
589 		break;
590 #endif
591 	default:
592 		r = 0;
593 		break;
594 	}
595 	return r;
596 
597 }
598 
599 long kvm_arch_dev_ioctl(struct file *filp,
600                         unsigned int ioctl, unsigned long arg)
601 {
602 	return -EINVAL;
603 }
604 
605 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
606 			   struct kvm_memory_slot *dont)
607 {
608 	kvmppc_core_free_memslot(kvm, free, dont);
609 }
610 
611 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
612 			    unsigned long npages)
613 {
614 	return kvmppc_core_create_memslot(kvm, slot, npages);
615 }
616 
617 int kvm_arch_prepare_memory_region(struct kvm *kvm,
618 				   struct kvm_memory_slot *memslot,
619 				   const struct kvm_userspace_memory_region *mem,
620 				   enum kvm_mr_change change)
621 {
622 	return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
623 }
624 
625 void kvm_arch_commit_memory_region(struct kvm *kvm,
626 				   const struct kvm_userspace_memory_region *mem,
627 				   const struct kvm_memory_slot *old,
628 				   const struct kvm_memory_slot *new,
629 				   enum kvm_mr_change change)
630 {
631 	kvmppc_core_commit_memory_region(kvm, mem, old, new);
632 }
633 
634 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
635 				   struct kvm_memory_slot *slot)
636 {
637 	kvmppc_core_flush_memslot(kvm, slot);
638 }
639 
640 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
641 {
642 	struct kvm_vcpu *vcpu;
643 	vcpu = kvmppc_core_vcpu_create(kvm, id);
644 	if (!IS_ERR(vcpu)) {
645 		vcpu->arch.wqp = &vcpu->wq;
646 		kvmppc_create_vcpu_debugfs(vcpu, id);
647 	}
648 	return vcpu;
649 }
650 
651 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
652 {
653 }
654 
655 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
656 {
657 	/* Make sure we're not using the vcpu anymore */
658 	hrtimer_cancel(&vcpu->arch.dec_timer);
659 
660 	kvmppc_remove_vcpu_debugfs(vcpu);
661 
662 	switch (vcpu->arch.irq_type) {
663 	case KVMPPC_IRQ_MPIC:
664 		kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
665 		break;
666 	case KVMPPC_IRQ_XICS:
667 		kvmppc_xics_free_icp(vcpu);
668 		break;
669 	}
670 
671 	kvmppc_core_vcpu_free(vcpu);
672 }
673 
674 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
675 {
676 	kvm_arch_vcpu_free(vcpu);
677 }
678 
679 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
680 {
681 	return kvmppc_core_pending_dec(vcpu);
682 }
683 
684 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
685 {
686 	struct kvm_vcpu *vcpu;
687 
688 	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
689 	kvmppc_decrementer_func(vcpu);
690 
691 	return HRTIMER_NORESTART;
692 }
693 
694 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
695 {
696 	int ret;
697 
698 	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
699 	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
700 	vcpu->arch.dec_expires = ~(u64)0;
701 
702 #ifdef CONFIG_KVM_EXIT_TIMING
703 	mutex_init(&vcpu->arch.exit_timing_lock);
704 #endif
705 	ret = kvmppc_subarch_vcpu_init(vcpu);
706 	return ret;
707 }
708 
709 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
710 {
711 	kvmppc_mmu_destroy(vcpu);
712 	kvmppc_subarch_vcpu_uninit(vcpu);
713 }
714 
715 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
716 {
717 #ifdef CONFIG_BOOKE
718 	/*
719 	 * vrsave (formerly usprg0) isn't used by Linux, but may
720 	 * be used by the guest.
721 	 *
722 	 * On non-booke this is associated with Altivec and
723 	 * is handled by code in book3s.c.
724 	 */
725 	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
726 #endif
727 	kvmppc_core_vcpu_load(vcpu, cpu);
728 }
729 
730 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
731 {
732 	kvmppc_core_vcpu_put(vcpu);
733 #ifdef CONFIG_BOOKE
734 	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
735 #endif
736 }
737 
738 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
739                                       struct kvm_run *run)
740 {
741 	u64 uninitialized_var(gpr);
742 
743 	if (run->mmio.len > sizeof(gpr)) {
744 		printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
745 		return;
746 	}
747 
748 	if (!vcpu->arch.mmio_host_swabbed) {
749 		switch (run->mmio.len) {
750 		case 8: gpr = *(u64 *)run->mmio.data; break;
751 		case 4: gpr = *(u32 *)run->mmio.data; break;
752 		case 2: gpr = *(u16 *)run->mmio.data; break;
753 		case 1: gpr = *(u8 *)run->mmio.data; break;
754 		}
755 	} else {
756 		switch (run->mmio.len) {
757 		case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
758 		case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
759 		case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
760 		case 1: gpr = *(u8 *)run->mmio.data; break;
761 		}
762 	}
763 
764 	if (vcpu->arch.mmio_sign_extend) {
765 		switch (run->mmio.len) {
766 #ifdef CONFIG_PPC64
767 		case 4:
768 			gpr = (s64)(s32)gpr;
769 			break;
770 #endif
771 		case 2:
772 			gpr = (s64)(s16)gpr;
773 			break;
774 		case 1:
775 			gpr = (s64)(s8)gpr;
776 			break;
777 		}
778 	}
779 
780 	kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
781 
782 	switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
783 	case KVM_MMIO_REG_GPR:
784 		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
785 		break;
786 	case KVM_MMIO_REG_FPR:
787 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
788 		break;
789 #ifdef CONFIG_PPC_BOOK3S
790 	case KVM_MMIO_REG_QPR:
791 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
792 		break;
793 	case KVM_MMIO_REG_FQPR:
794 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
795 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
796 		break;
797 #endif
798 	default:
799 		BUG();
800 	}
801 }
802 
803 static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
804 				unsigned int rt, unsigned int bytes,
805 				int is_default_endian, int sign_extend)
806 {
807 	int idx, ret;
808 	bool host_swabbed;
809 
810 	/* Pity C doesn't have a logical XOR operator */
811 	if (kvmppc_need_byteswap(vcpu)) {
812 		host_swabbed = is_default_endian;
813 	} else {
814 		host_swabbed = !is_default_endian;
815 	}
816 
817 	if (bytes > sizeof(run->mmio.data)) {
818 		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
819 		       run->mmio.len);
820 	}
821 
822 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
823 	run->mmio.len = bytes;
824 	run->mmio.is_write = 0;
825 
826 	vcpu->arch.io_gpr = rt;
827 	vcpu->arch.mmio_host_swabbed = host_swabbed;
828 	vcpu->mmio_needed = 1;
829 	vcpu->mmio_is_write = 0;
830 	vcpu->arch.mmio_sign_extend = sign_extend;
831 
832 	idx = srcu_read_lock(&vcpu->kvm->srcu);
833 
834 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
835 			      bytes, &run->mmio.data);
836 
837 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
838 
839 	if (!ret) {
840 		kvmppc_complete_mmio_load(vcpu, run);
841 		vcpu->mmio_needed = 0;
842 		return EMULATE_DONE;
843 	}
844 
845 	return EMULATE_DO_MMIO;
846 }
847 
848 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
849 		       unsigned int rt, unsigned int bytes,
850 		       int is_default_endian)
851 {
852 	return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
853 }
854 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
855 
856 /* Same as above, but sign extends */
857 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
858 			unsigned int rt, unsigned int bytes,
859 			int is_default_endian)
860 {
861 	return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
862 }
863 
864 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
865 			u64 val, unsigned int bytes, int is_default_endian)
866 {
867 	void *data = run->mmio.data;
868 	int idx, ret;
869 	bool host_swabbed;
870 
871 	/* Pity C doesn't have a logical XOR operator */
872 	if (kvmppc_need_byteswap(vcpu)) {
873 		host_swabbed = is_default_endian;
874 	} else {
875 		host_swabbed = !is_default_endian;
876 	}
877 
878 	if (bytes > sizeof(run->mmio.data)) {
879 		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
880 		       run->mmio.len);
881 	}
882 
883 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
884 	run->mmio.len = bytes;
885 	run->mmio.is_write = 1;
886 	vcpu->mmio_needed = 1;
887 	vcpu->mmio_is_write = 1;
888 
889 	/* Store the value at the lowest bytes in 'data'. */
890 	if (!host_swabbed) {
891 		switch (bytes) {
892 		case 8: *(u64 *)data = val; break;
893 		case 4: *(u32 *)data = val; break;
894 		case 2: *(u16 *)data = val; break;
895 		case 1: *(u8  *)data = val; break;
896 		}
897 	} else {
898 		switch (bytes) {
899 		case 8: *(u64 *)data = swab64(val); break;
900 		case 4: *(u32 *)data = swab32(val); break;
901 		case 2: *(u16 *)data = swab16(val); break;
902 		case 1: *(u8  *)data = val; break;
903 		}
904 	}
905 
906 	idx = srcu_read_lock(&vcpu->kvm->srcu);
907 
908 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
909 			       bytes, &run->mmio.data);
910 
911 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
912 
913 	if (!ret) {
914 		vcpu->mmio_needed = 0;
915 		return EMULATE_DONE;
916 	}
917 
918 	return EMULATE_DO_MMIO;
919 }
920 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
921 
922 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
923 {
924 	int r = 0;
925 	union kvmppc_one_reg val;
926 	int size;
927 
928 	size = one_reg_size(reg->id);
929 	if (size > sizeof(val))
930 		return -EINVAL;
931 
932 	r = kvmppc_get_one_reg(vcpu, reg->id, &val);
933 	if (r == -EINVAL) {
934 		r = 0;
935 		switch (reg->id) {
936 #ifdef CONFIG_ALTIVEC
937 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
938 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
939 				r = -ENXIO;
940 				break;
941 			}
942 			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
943 			break;
944 		case KVM_REG_PPC_VSCR:
945 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
946 				r = -ENXIO;
947 				break;
948 			}
949 			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
950 			break;
951 		case KVM_REG_PPC_VRSAVE:
952 			val = get_reg_val(reg->id, vcpu->arch.vrsave);
953 			break;
954 #endif /* CONFIG_ALTIVEC */
955 		default:
956 			r = -EINVAL;
957 			break;
958 		}
959 	}
960 
961 	if (r)
962 		return r;
963 
964 	if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
965 		r = -EFAULT;
966 
967 	return r;
968 }
969 
970 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
971 {
972 	int r;
973 	union kvmppc_one_reg val;
974 	int size;
975 
976 	size = one_reg_size(reg->id);
977 	if (size > sizeof(val))
978 		return -EINVAL;
979 
980 	if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
981 		return -EFAULT;
982 
983 	r = kvmppc_set_one_reg(vcpu, reg->id, &val);
984 	if (r == -EINVAL) {
985 		r = 0;
986 		switch (reg->id) {
987 #ifdef CONFIG_ALTIVEC
988 		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
989 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
990 				r = -ENXIO;
991 				break;
992 			}
993 			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
994 			break;
995 		case KVM_REG_PPC_VSCR:
996 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
997 				r = -ENXIO;
998 				break;
999 			}
1000 			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1001 			break;
1002 		case KVM_REG_PPC_VRSAVE:
1003 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1004 				r = -ENXIO;
1005 				break;
1006 			}
1007 			vcpu->arch.vrsave = set_reg_val(reg->id, val);
1008 			break;
1009 #endif /* CONFIG_ALTIVEC */
1010 		default:
1011 			r = -EINVAL;
1012 			break;
1013 		}
1014 	}
1015 
1016 	return r;
1017 }
1018 
1019 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1020 {
1021 	int r;
1022 	sigset_t sigsaved;
1023 
1024 	if (vcpu->sigset_active)
1025 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1026 
1027 	if (vcpu->mmio_needed) {
1028 		if (!vcpu->mmio_is_write)
1029 			kvmppc_complete_mmio_load(vcpu, run);
1030 		vcpu->mmio_needed = 0;
1031 	} else if (vcpu->arch.osi_needed) {
1032 		u64 *gprs = run->osi.gprs;
1033 		int i;
1034 
1035 		for (i = 0; i < 32; i++)
1036 			kvmppc_set_gpr(vcpu, i, gprs[i]);
1037 		vcpu->arch.osi_needed = 0;
1038 	} else if (vcpu->arch.hcall_needed) {
1039 		int i;
1040 
1041 		kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1042 		for (i = 0; i < 9; ++i)
1043 			kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1044 		vcpu->arch.hcall_needed = 0;
1045 #ifdef CONFIG_BOOKE
1046 	} else if (vcpu->arch.epr_needed) {
1047 		kvmppc_set_epr(vcpu, run->epr.epr);
1048 		vcpu->arch.epr_needed = 0;
1049 #endif
1050 	}
1051 
1052 	r = kvmppc_vcpu_run(run, vcpu);
1053 
1054 	if (vcpu->sigset_active)
1055 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1056 
1057 	return r;
1058 }
1059 
1060 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1061 {
1062 	if (irq->irq == KVM_INTERRUPT_UNSET) {
1063 		kvmppc_core_dequeue_external(vcpu);
1064 		return 0;
1065 	}
1066 
1067 	kvmppc_core_queue_external(vcpu, irq);
1068 
1069 	kvm_vcpu_kick(vcpu);
1070 
1071 	return 0;
1072 }
1073 
1074 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1075 				     struct kvm_enable_cap *cap)
1076 {
1077 	int r;
1078 
1079 	if (cap->flags)
1080 		return -EINVAL;
1081 
1082 	switch (cap->cap) {
1083 	case KVM_CAP_PPC_OSI:
1084 		r = 0;
1085 		vcpu->arch.osi_enabled = true;
1086 		break;
1087 	case KVM_CAP_PPC_PAPR:
1088 		r = 0;
1089 		vcpu->arch.papr_enabled = true;
1090 		break;
1091 	case KVM_CAP_PPC_EPR:
1092 		r = 0;
1093 		if (cap->args[0])
1094 			vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1095 		else
1096 			vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1097 		break;
1098 #ifdef CONFIG_BOOKE
1099 	case KVM_CAP_PPC_BOOKE_WATCHDOG:
1100 		r = 0;
1101 		vcpu->arch.watchdog_enabled = true;
1102 		break;
1103 #endif
1104 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1105 	case KVM_CAP_SW_TLB: {
1106 		struct kvm_config_tlb cfg;
1107 		void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1108 
1109 		r = -EFAULT;
1110 		if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1111 			break;
1112 
1113 		r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1114 		break;
1115 	}
1116 #endif
1117 #ifdef CONFIG_KVM_MPIC
1118 	case KVM_CAP_IRQ_MPIC: {
1119 		struct fd f;
1120 		struct kvm_device *dev;
1121 
1122 		r = -EBADF;
1123 		f = fdget(cap->args[0]);
1124 		if (!f.file)
1125 			break;
1126 
1127 		r = -EPERM;
1128 		dev = kvm_device_from_filp(f.file);
1129 		if (dev)
1130 			r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1131 
1132 		fdput(f);
1133 		break;
1134 	}
1135 #endif
1136 #ifdef CONFIG_KVM_XICS
1137 	case KVM_CAP_IRQ_XICS: {
1138 		struct fd f;
1139 		struct kvm_device *dev;
1140 
1141 		r = -EBADF;
1142 		f = fdget(cap->args[0]);
1143 		if (!f.file)
1144 			break;
1145 
1146 		r = -EPERM;
1147 		dev = kvm_device_from_filp(f.file);
1148 		if (dev)
1149 			r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1150 
1151 		fdput(f);
1152 		break;
1153 	}
1154 #endif /* CONFIG_KVM_XICS */
1155 	default:
1156 		r = -EINVAL;
1157 		break;
1158 	}
1159 
1160 	if (!r)
1161 		r = kvmppc_sanity_check(vcpu);
1162 
1163 	return r;
1164 }
1165 
1166 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1167                                     struct kvm_mp_state *mp_state)
1168 {
1169 	return -EINVAL;
1170 }
1171 
1172 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1173                                     struct kvm_mp_state *mp_state)
1174 {
1175 	return -EINVAL;
1176 }
1177 
1178 long kvm_arch_vcpu_ioctl(struct file *filp,
1179                          unsigned int ioctl, unsigned long arg)
1180 {
1181 	struct kvm_vcpu *vcpu = filp->private_data;
1182 	void __user *argp = (void __user *)arg;
1183 	long r;
1184 
1185 	switch (ioctl) {
1186 	case KVM_INTERRUPT: {
1187 		struct kvm_interrupt irq;
1188 		r = -EFAULT;
1189 		if (copy_from_user(&irq, argp, sizeof(irq)))
1190 			goto out;
1191 		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1192 		goto out;
1193 	}
1194 
1195 	case KVM_ENABLE_CAP:
1196 	{
1197 		struct kvm_enable_cap cap;
1198 		r = -EFAULT;
1199 		if (copy_from_user(&cap, argp, sizeof(cap)))
1200 			goto out;
1201 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1202 		break;
1203 	}
1204 
1205 	case KVM_SET_ONE_REG:
1206 	case KVM_GET_ONE_REG:
1207 	{
1208 		struct kvm_one_reg reg;
1209 		r = -EFAULT;
1210 		if (copy_from_user(&reg, argp, sizeof(reg)))
1211 			goto out;
1212 		if (ioctl == KVM_SET_ONE_REG)
1213 			r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
1214 		else
1215 			r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
1216 		break;
1217 	}
1218 
1219 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1220 	case KVM_DIRTY_TLB: {
1221 		struct kvm_dirty_tlb dirty;
1222 		r = -EFAULT;
1223 		if (copy_from_user(&dirty, argp, sizeof(dirty)))
1224 			goto out;
1225 		r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1226 		break;
1227 	}
1228 #endif
1229 	default:
1230 		r = -EINVAL;
1231 	}
1232 
1233 out:
1234 	return r;
1235 }
1236 
1237 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1238 {
1239 	return VM_FAULT_SIGBUS;
1240 }
1241 
1242 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1243 {
1244 	u32 inst_nop = 0x60000000;
1245 #ifdef CONFIG_KVM_BOOKE_HV
1246 	u32 inst_sc1 = 0x44000022;
1247 	pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1248 	pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1249 	pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1250 	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1251 #else
1252 	u32 inst_lis = 0x3c000000;
1253 	u32 inst_ori = 0x60000000;
1254 	u32 inst_sc = 0x44000002;
1255 	u32 inst_imm_mask = 0xffff;
1256 
1257 	/*
1258 	 * The hypercall to get into KVM from within guest context is as
1259 	 * follows:
1260 	 *
1261 	 *    lis r0, r0, KVM_SC_MAGIC_R0@h
1262 	 *    ori r0, KVM_SC_MAGIC_R0@l
1263 	 *    sc
1264 	 *    nop
1265 	 */
1266 	pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1267 	pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1268 	pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1269 	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1270 #endif
1271 
1272 	pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1273 
1274 	return 0;
1275 }
1276 
1277 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1278 			  bool line_status)
1279 {
1280 	if (!irqchip_in_kernel(kvm))
1281 		return -ENXIO;
1282 
1283 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1284 					irq_event->irq, irq_event->level,
1285 					line_status);
1286 	return 0;
1287 }
1288 
1289 
1290 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1291 				   struct kvm_enable_cap *cap)
1292 {
1293 	int r;
1294 
1295 	if (cap->flags)
1296 		return -EINVAL;
1297 
1298 	switch (cap->cap) {
1299 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1300 	case KVM_CAP_PPC_ENABLE_HCALL: {
1301 		unsigned long hcall = cap->args[0];
1302 
1303 		r = -EINVAL;
1304 		if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1305 		    cap->args[1] > 1)
1306 			break;
1307 		if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1308 			break;
1309 		if (cap->args[1])
1310 			set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1311 		else
1312 			clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1313 		r = 0;
1314 		break;
1315 	}
1316 #endif
1317 	default:
1318 		r = -EINVAL;
1319 		break;
1320 	}
1321 
1322 	return r;
1323 }
1324 
1325 long kvm_arch_vm_ioctl(struct file *filp,
1326                        unsigned int ioctl, unsigned long arg)
1327 {
1328 	struct kvm *kvm __maybe_unused = filp->private_data;
1329 	void __user *argp = (void __user *)arg;
1330 	long r;
1331 
1332 	switch (ioctl) {
1333 	case KVM_PPC_GET_PVINFO: {
1334 		struct kvm_ppc_pvinfo pvinfo;
1335 		memset(&pvinfo, 0, sizeof(pvinfo));
1336 		r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1337 		if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1338 			r = -EFAULT;
1339 			goto out;
1340 		}
1341 
1342 		break;
1343 	}
1344 	case KVM_ENABLE_CAP:
1345 	{
1346 		struct kvm_enable_cap cap;
1347 		r = -EFAULT;
1348 		if (copy_from_user(&cap, argp, sizeof(cap)))
1349 			goto out;
1350 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1351 		break;
1352 	}
1353 #ifdef CONFIG_PPC_BOOK3S_64
1354 	case KVM_CREATE_SPAPR_TCE_64: {
1355 		struct kvm_create_spapr_tce_64 create_tce_64;
1356 
1357 		r = -EFAULT;
1358 		if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
1359 			goto out;
1360 		if (create_tce_64.flags) {
1361 			r = -EINVAL;
1362 			goto out;
1363 		}
1364 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1365 		goto out;
1366 	}
1367 	case KVM_CREATE_SPAPR_TCE: {
1368 		struct kvm_create_spapr_tce create_tce;
1369 		struct kvm_create_spapr_tce_64 create_tce_64;
1370 
1371 		r = -EFAULT;
1372 		if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1373 			goto out;
1374 
1375 		create_tce_64.liobn = create_tce.liobn;
1376 		create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
1377 		create_tce_64.offset = 0;
1378 		create_tce_64.size = create_tce.window_size >>
1379 				IOMMU_PAGE_SHIFT_4K;
1380 		create_tce_64.flags = 0;
1381 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1382 		goto out;
1383 	}
1384 	case KVM_PPC_GET_SMMU_INFO: {
1385 		struct kvm_ppc_smmu_info info;
1386 		struct kvm *kvm = filp->private_data;
1387 
1388 		memset(&info, 0, sizeof(info));
1389 		r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1390 		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1391 			r = -EFAULT;
1392 		break;
1393 	}
1394 	case KVM_PPC_RTAS_DEFINE_TOKEN: {
1395 		struct kvm *kvm = filp->private_data;
1396 
1397 		r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1398 		break;
1399 	}
1400 	default: {
1401 		struct kvm *kvm = filp->private_data;
1402 		r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1403 	}
1404 #else /* CONFIG_PPC_BOOK3S_64 */
1405 	default:
1406 		r = -ENOTTY;
1407 #endif
1408 	}
1409 out:
1410 	return r;
1411 }
1412 
1413 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1414 static unsigned long nr_lpids;
1415 
1416 long kvmppc_alloc_lpid(void)
1417 {
1418 	long lpid;
1419 
1420 	do {
1421 		lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1422 		if (lpid >= nr_lpids) {
1423 			pr_err("%s: No LPIDs free\n", __func__);
1424 			return -ENOMEM;
1425 		}
1426 	} while (test_and_set_bit(lpid, lpid_inuse));
1427 
1428 	return lpid;
1429 }
1430 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1431 
1432 void kvmppc_claim_lpid(long lpid)
1433 {
1434 	set_bit(lpid, lpid_inuse);
1435 }
1436 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1437 
1438 void kvmppc_free_lpid(long lpid)
1439 {
1440 	clear_bit(lpid, lpid_inuse);
1441 }
1442 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1443 
1444 void kvmppc_init_lpid(unsigned long nr_lpids_param)
1445 {
1446 	nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1447 	memset(lpid_inuse, 0, sizeof(lpid_inuse));
1448 }
1449 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1450 
1451 int kvm_arch_init(void *opaque)
1452 {
1453 	return 0;
1454 }
1455 
1456 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
1457